[Debian-ha-commits] [pcs] 03/05: Imported Upstream version 0.9.149

Christoph Berg myon at debian.org
Thu Mar 10 19:21:48 UTC 2016


This is an automated email from the git hooks/post-receive script.

myon pushed a commit to branch master
in repository pcs.

commit 40284db47c3732f4f77592c0c98e2a7b7b8b4854
Author: Christoph Berg <myon at debian.org>
Date:   Thu Mar 10 20:07:02 2016 +0100

    Imported Upstream version 0.9.149
---
 Makefile                                           |  45 +-
 README                                             |  13 -
 README.md                                          |  15 -
 newversion.py                                      |   3 +-
 pcs/acl.py                                         | 401 +++++----
 pcs/cluster.py                                     | 197 ++---
 pcs/error_codes.py                                 |  23 +
 pcs/errors.py                                      |  38 +
 pcs/library_acl.py                                 | 135 +++
 pcs/pcs.8                                          |  50 +-
 pcs/pcs.py                                         |   2 +
 pcs/prop.py                                        | 107 ++-
 pcs/resource.py                                    |  14 +-
 pcs/settings.py                                    |   2 +-
 ....x86_64-linux-gnu.debian => settings.py.debian} |  10 +-
 pcs/settings.py.i386-linux-gnu.debian              |  23 -
 pcs/status.py                                      |  14 +-
 pcs/test/Makefile                                  |   1 +
 pcs/test/library_test_tools.py                     |  93 ++
 pcs/test/pcs_test_assertions.py                    |  75 ++
 pcs/test/pcs_test_functions.py                     |  20 +-
 pcs/test/test_acl.py                               |  89 +-
 pcs/test/test_cluster.py                           |  19 +-
 pcs/test/test_library_acl.py                       | 214 +++++
 pcs/test/test_properties.py                        | 210 ++++-
 pcs/test/test_resource.py                          | 175 ++--
 pcs/test/test_stonith.py                           |   8 +-
 pcs/test/test_utils.py                             | 323 +++++--
 pcs/usage.py                                       |  61 +-
 pcs/utils.py                                       | 313 +++++--
 pcsd/auth.rb                                       |  50 +-
 pcsd/bootstrap.rb                                  |   2 +-
 pcsd/cfgsync.rb                                    |  20 +-
 pcsd/cluster_entity.rb                             |   2 +-
 pcsd/fenceagent.rb                                 |   8 +-
 pcsd/pcs.rb                                        | 264 +++---
 pcsd/pcsd                                          |   2 +
 pcsd/pcsd-cli.rb                                   |  42 +-
 pcsd/pcsd.conf                                     |   5 +
 pcsd/pcsd.rb                                       | 958 +++++++++++++--------
 pcsd/public/js/nodes-ember.js                      | 178 +++-
 pcsd/public/js/pcsd.js                             | 435 +++++++---
 pcsd/remote.rb                                     | 756 +++++++---------
 pcsd/resource.rb                                   |  16 +-
 pcsd/session.rb                                    |  71 ++
 ....x86_64-linux-gnu.debian => settings.rb.debian} |   4 +-
 pcsd/settings.rb.i386-linux-gnu.debian             |  24 -
 pcsd/test/test_all_suite.rb                        |   1 +
 pcsd/test/test_auth.rb                             |  36 +-
 pcsd/test/test_session.rb                          |  71 ++
 pcsd/views/_configure.erb                          |  44 +-
 pcsd/views/_dialogs.erb                            |  21 +-
 pcsd/views/_permissions_cluster.erb                |   2 +-
 pcsd/views/login.erb                               |  22 +-
 pcsd/views/main.erb                                |  37 +
 pcsd/views/manage.erb                              |  36 -
 pcsd/views/permissions.erb                         |   1 +
 setup.py                                           |   2 +-
 58 files changed, 3804 insertions(+), 1999 deletions(-)

diff --git a/Makefile b/Makefile
index 2d3ff51..a3d5687 100644
--- a/Makefile
+++ b/Makefile
@@ -1,18 +1,17 @@
 # Compatibility with GNU/Linux [i.e. Debian] based distros
 UNAME_OS_GNU := $(shell if uname -o | grep -q "GNU/Linux" ; then echo true; else echo false; fi)
-UNAME_KERNEL_DEBIAN := $(shell if uname -v | grep -q "Debian\|Ubuntu" ; then echo true; else echo false; fi)
+DISTRO_DEBIAN := $(shell if [ -e /etc/debian_version ] ; then echo true; else echo false; fi)
 IS_DEBIAN=false
-UNAME_DEBIAN_VER_8=false
+DISTRO_DEBIAN_VER_8=false
 
 ifeq ($(UNAME_OS_GNU),true)
-  ifeq ($(UNAME_KERNEL_DEBIAN),true)
+  ifeq ($(DISTRO_DEBIAN),true)
     IS_DEBIAN=true
-    UNAME_DEBIAN_VER_8 := $(shell if grep -q -i "8" /etc/debian_version ; then echo true; else echo false; fi)
-    settings_x86_64 := $(shell if uname -m | grep -q -i "x86_64" ; then echo true; else echo false; fi)
-    settings_i386=false
-    ifeq ($(settings_x86_64),false)
-      settings_i386 := $(shell if uname -m | grep -q -i "i386" ; then echo true; else echo false; fi)
-    endif
+    DISTRO_DEBIAN_VER_8 := $(shell if grep -q -i "^8\|jessie" /etc/debian_version ; then echo true; else echo false; fi)
+    # dpkg-architecture is in the optional dpkg-dev package, unfortunately.
+    #DEB_HOST_MULTIARCH := $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
+    # TODO: Use lsb_architecture to get the multiarch tuple if/when it becomes available in distributions.
+    DEB_HOST_MULTIARCH := $(shell dpkg -L libc6 | sed -nr 's|^/etc/ld\.so\.conf\.d/(.*)\.conf$$|\1|p')
   endif
 endif
 
@@ -66,18 +65,10 @@ ifndef initdir
 endif
 
 ifndef install_settings
-  install_settings=false
-else
-  ifeq ($(install_settings),true)
-    ifeq ($(settings_x86_64),true)
-      settings_file=settings.py.x86_64-linux-gnu.debian
-      settings_file_pcsd=settings.rb.x86_64-linux-gnu.debian
-    else
-      ifeq ($(settings_i386),true)
-        settings_file=settings.py.i386-linux-gnu.debian
-        settings_file_pcsd=settings.rb.i386-linux-gnu.debian
-      endif
-    endif
+  ifeq ($(IS_DEBIAN),true)
+    install_settings=true
+  else
+    install_settings=false
   endif
 endif
 
@@ -91,10 +82,13 @@ install: bash_completion
 ifeq ($(IS_DEBIAN),true)
   ifeq ($(install_settings),true)
 	rm -f  ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
-	install -m755 pcs/${settings_file} ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
+	tmp_settings=`mktemp`; \
+	        sed s/DEB_HOST_MULTIARCH/${DEB_HOST_MULTIARCH}/g pcs/settings.py.debian > $$tmp_settings; \
+	        install -m644 $$tmp_settings ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py; \
+	        rm -f $$tmp_settings
+	python -m compileall -fl ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
   endif
 endif
-	
 
 install_pcsd:
 ifeq ($(BUILD_GEMS),true)
@@ -109,7 +103,10 @@ ifeq ($(IS_DEBIAN),true)
 	install  pcsd/pcsd.pam.debian ${DESTDIR}/etc/pam.d/pcsd
   ifeq ($(install_settings),true)
 	rm -f  ${DESTDIR}/usr/share/pcsd/settings.rb
-	install -m755 pcsd/${settings_file_pcsd} ${DESTDIR}/usr/share/pcsd/settings.rb
+	tmp_settings_pcsd=`mktemp`; \
+	        sed s/DEB_HOST_MULTIARCH/${DEB_HOST_MULTIARCH}/g pcsd/settings.rb.debian > $$tmp_settings_pcsd; \
+	        install -m644 $$tmp_settings_pcsd ${DESTDIR}/usr/share/pcsd/settings.rb; \
+	        rm -f $$tmp_settings_pcsd
   endif
   ifeq ($(IS_SYSTEMCTL),true)
 	install -d ${DESTDIR}/${systemddir}/system/
diff --git a/README b/README
index a42b03a..0674d96 100644
--- a/README
+++ b/README
@@ -8,13 +8,6 @@ To install pcs run the following in terminal
 # cd pcs-0.9.143
 # make install
 
-If you are using Debian or Debian-based distribution (such as Ubuntu), run
-the following instead:
-
-# tar -xzvf pcs-0.9.143.tar.gz
-# cd pcs-0.9.143
-# make install install_settings=true
-
 This will install pcs into /usr/sbin/pcs
 
 To create a cluster run the following commands on all nodes (replacing node1,
@@ -42,12 +35,6 @@ Fedora, and development packages installed)
 # cd pcsd ; make get_gems ; cd ..
 # make install_pcsd
 
-If you are using Debian or Debian-based distribution (such as Ubuntu), run
-the following instead:
-
-# cd pcsd ; make get_gems ; cd ..
-# make install_pcsd install_settings=true
-
 If you are using GNU/Linux its now time to:
 # systemctl daemon-reload
 
diff --git a/README.md b/README.md
index bc8f6f0..b0d6cad 100644
--- a/README.md
+++ b/README.md
@@ -15,14 +15,6 @@
    # make install
    ```
 
-   If you are using Debian or Debian-based distribution (such as Ubuntu), run the following instead:
-
-   ```shell
-   # tar -xzvf pcs-0.9.143.tar.gz
-   # cd pcs-0.9.143
-   # make install install_settings=true
-   ```
-
    This will install pcs into `/usr/sbin/pcs`.
 
 <br />
@@ -76,13 +68,6 @@
    # make install_pcsd
    ```
 
-   If you are using Debian or Debian-based distribution (such as Ubuntu), run the following instead:
-
-   ```shell
-   # cd pcsd ; make get_gems ; cd ..
-   # make install_pcsd install_settings=true
-   ```
-
    If you are using GNU/Linux its now time to:
 
    ```shell
diff --git a/newversion.py b/newversion.py
index 8007f52..09fb547 100644
--- a/newversion.py
+++ b/newversion.py
@@ -26,8 +26,7 @@ new_version = ".".join(pcs_version_split)
 
 print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' setup.py"))
 print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py"))
-print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py.i386-linux-gnu.debian"))
-print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py.x86_64-linux-gnu.debian"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py.debian"))
 print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcsd/bootstrap.rb"))
 
 manpage_head = '.TH PCS "8" "{date}" "pcs {version}" "System Administration Utilities"'.format(
diff --git a/pcs/acl.py b/pcs/acl.py
index bebd7d2..e347c27 100644
--- a/pcs/acl.py
+++ b/pcs/acl.py
@@ -8,7 +8,18 @@ import sys
 import usage
 import utils
 import prop
-
+from errors import CmdLineInputError
+from errors import ReportItem
+from errors import ReportItemSeverity
+from errors import error_codes
+from library_acl import LibraryError
+from library_acl import create_role
+from library_acl import provide_role
+from library_acl import add_permissions_to_role
+
+def exit_on_cmdline_input_errror(usage_name):
+    usage.acl([usage_name])
+    sys.exit(1)
 
 def acl_cmd(argv):
     if len(argv) == 0:
@@ -24,8 +35,6 @@ def acl_cmd(argv):
         usage.acl(argv)
     elif (sub_cmd == "show"):
         acl_show(argv)
-#    elif (sub_cmd == "grant"):
-#        acl_grant(argv)
     elif (sub_cmd == "enable"):
         acl_enable(argv)
     elif (sub_cmd == "disable"):
@@ -63,147 +72,30 @@ def acl_enable(argv):
 def acl_disable(argv):
     prop.set_property(["enable-acl=false"])
 
-def acl_grant(argv):
-    print("Not yet implemented")
-
 def acl_role(argv):
     if len(argv) < 2:
         usage.acl(["role"])
         sys.exit(1)
 
-    dom = utils.get_cib_dom()
-    dom, acls = get_acls(dom)
-
     command = argv.pop(0)
     if command == "create":
-        role_name = argv.pop(0)
-        if argv and argv[0].startswith('description=') and len(argv[0]) > 12:
-            description = argv.pop(0)[12:]
-        else:
-            description = ""
-        id_valid, id_error = utils.validate_xml_id(role_name, 'ACL role')
-        if not id_valid:
-            utils.err(id_error)
-        if utils.dom_get_element_with_id(dom, "acl_role", role_name):
-            utils.err("role %s already exists" % role_name)
-        if utils.does_id_exist(dom,role_name):
-            utils.err(role_name + " already exists")
-
-        element = dom.createElement("acl_role")
-        element.setAttribute("id",role_name)
-        if description != "":
-            element.setAttribute("description", description)
-        acls.appendChild(element)
+        try:
+            run_create_role(argv)
+        except CmdLineInputError as e:
+            exit_on_cmdline_input_errror('role create')
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
 
-        if not add_permissions_to_role(element, argv):
-            usage.acl(["role create"])
-            sys.exit(1)
-        utils.replace_cib_configuration(dom)
 
     elif command == "delete":
-        if len(argv) < 1:
-            usage.acl(["role delete"])
-            sys.exit(1)
-
-        role_id = argv.pop(0)
-        found = False
-        for elem in dom.getElementsByTagName("acl_role"):
-            if elem.getAttribute("id") == role_id:
-                found = True
-                elem.parentNode.removeChild(elem)
-                break
-        if not found:
-            utils.err("unable to find acl role: %s" % role_id)
-
-        # Remove any references to this role in acl_target or acl_group
-        for elem in dom.getElementsByTagName("role"):
-            if elem.getAttribute("id") == role_id:
-                user_group = elem.parentNode
-                user_group.removeChild(elem)
-                if "--autodelete" in utils.pcs_options:
-                    if not user_group.getElementsByTagName("role"):
-                        user_group.parentNode.removeChild(user_group)
-
-        utils.replace_cib_configuration(dom)
+        run_role_delete(argv)
     elif command == "assign":
-        if len(argv) < 2:
-            usage.acl(["role assign"])
-            sys.exit(1)
-
-        if len(argv) == 2:
-            role_id = argv[0]
-            ug_id = argv[1]
-        elif len(argv) > 2 and argv[1] == "to":
-            role_id = argv[0]
-            ug_id = argv[2]
-        else:
-            usage.acl(["role assign"])
-            sys.exit(1)
-
-        found = False
-        for role in dom.getElementsByTagName("acl_role"):
-            if role.getAttribute("id") == role_id:
-                found = True
-                break
-
-        if not found:
-            utils.err("cannot find role: %s" % role_id)
-
-        found = False
-        for ug in dom.getElementsByTagName("acl_target") + dom.getElementsByTagName("acl_group"):
-            if ug.getAttribute("id") == ug_id:
-                found = True
-                break
-
-        if not found:
-            utils.err("cannot find user or group: %s" % ug_id)
-
-        for current_role in ug.getElementsByTagName("role"):
-            if current_role.getAttribute("id") == role_id:
-                utils.err(role_id + " is already assigned to " + ug_id)
-
-        new_role = dom.createElement("role")
-        new_role.setAttribute("id", role_id)
-        ug.appendChild(new_role)
-        utils.replace_cib_configuration(dom)
+        run_role_assign(argv)
     elif command == "unassign":
-        if len(argv) < 2:
-            usage.acl(["role unassign"])
-            sys.exit(1)
-
-        role_id = argv.pop(0)
-        if len(argv) > 1 and argv[0] == "from":
-            ug_id = argv[1]
-        else:
-            ug_id = argv[0]
-
-        found = False
-        for ug in dom.getElementsByTagName("acl_target") + dom.getElementsByTagName("acl_group"):
-            if ug.getAttribute("id") == ug_id:
-                found = True
-                break
-
-        if not found:
-            utils.err("cannot find user or group: %s" % ug_id)
-
-        found = False
-        for current_role in ug.getElementsByTagName("role"):
-            if current_role.getAttribute("id") == role_id:
-                found = True
-                current_role.parentNode.removeChild(current_role)
-                break
-
-        if not found:
-            utils.err("cannot find role: %s, assigned to user/group: %s" % (role_id, ug_id))
-
-        if "--autodelete" in utils.pcs_options:
-            if not ug.getElementsByTagName("role"):
-                ug.parentNode.removeChild(ug)
-
-        utils.replace_cib_configuration(dom)
-
+        run_role_unassign(argv)
     else:
-        utils.err("Unknown pcs acl role command: '" + command + "' (try create or delete)")
+        usage.acl(["role"])
+        sys.exit(1)
 
 def acl_target(argv,group=False):
     if len(argv) < 2:
@@ -215,7 +107,7 @@ def acl_target(argv,group=False):
             sys.exit(1)
 
     dom = utils.get_cib_dom()
-    dom, acls = get_acls(dom)
+    acls = utils.get_acls(dom)
 
     command = argv.pop(0)
     tug_id = argv.pop(0)
@@ -274,47 +166,17 @@ def acl_permission(argv):
         usage.acl(["permission"])
         sys.exit(1)
 
-    dom = utils.get_cib_dom()
-    dom, acls = get_acls(dom)
-
     command = argv.pop(0)
     if command == "add":
-        if len(argv) < 4:
-            usage.acl(["permission add"])
-            sys.exit(1)
-        role_id = argv.pop(0)
-        found = False
-        for role in dom.getElementsByTagName("acl_role"):
-            if role.getAttribute("id") == role_id:
-                found = True
-                break
-        if found == False:
-            acl_role(["create", role_id] + argv) 
-            return
-
-        if not argv:
-            usage.acl(["permission add"])
-            sys.exit(1)
-        if not add_permissions_to_role(role, argv):
-            usage.acl(["permission add"])
-            sys.exit(1)
-        utils.replace_cib_configuration(dom)
+        try:
+            run_permission_add(argv)
+        except CmdLineInputError as e:
+            exit_on_cmdline_input_errror('permission add')
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
 
     elif command == "delete":
-        if len(argv) < 1:
-            usage.acl(["permission delete"])
-            sys.exit(1)
-
-        perm_id = argv.pop(0)
-        found = False
-        for elem in dom.getElementsByTagName("acl_permission"):
-            if elem.getAttribute("id") == perm_id:
-                elem.parentNode.removeChild(elem)
-                found = True
-        if not found:
-            utils.err("Unable to find permission with id: %s" % perm_id)
-
-        utils.replace_cib_configuration(dom)
+        run_permission_delete(argv)
 
     else:
         usage.acl(["permission"])
@@ -350,39 +212,172 @@ def print_roles(dom):
             perm_name += " (" + perm.getAttribute("id") + ")"
             print(perm_name)
 
-def get_acls(dom):        
-    acls = dom.getElementsByTagName("acls")
-    if len(acls) == 0:
-        acls = dom.createElement("acls")
-        conf = dom.getElementsByTagName("configuration")
-        if len(conf) == 0:
-            utils.err("Unable to get configuration section of cib")
-        conf[0].appendChild(acls)
+def argv_to_permission_info_list(argv):
+    if len(argv) % 3 != 0:
+        raise CmdLineInputError()
+
+    permission_info_list = zip(
+        [permission.lower() for permission in argv[::3]],
+        [scope_type.lower() for scope_type in argv[1::3]],
+        argv[2::3]
+    )
+
+    for permission, scope_type, scope in permission_info_list:
+        if(
+            permission not in ['read', 'write', 'deny']
+            or
+            scope_type not in ['xpath', 'id']
+        ):
+            raise CmdLineInputError()
+
+    return permission_info_list
+
+def run_create_role(argv):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+    role_id = argv.pop(0)
+    description = ""
+    desc_key = 'description='
+    if argv and argv[0].startswith(desc_key) and len(argv[0]) > len(desc_key):
+        description = argv.pop(0)[len(desc_key):]
+    permission_info_list = argv_to_permission_info_list(argv)
+
+    dom = utils.get_cib_dom()
+    create_role(dom, role_id, description)
+    add_permissions_to_role(dom, role_id, permission_info_list)
+    utils.replace_cib_configuration(dom)
+
+def run_role_delete(argv):
+    if len(argv) < 1:
+        usage.acl(["role delete"])
+        sys.exit(1)
+
+    role_id = argv.pop(0)
+    dom = utils.get_cib_dom()
+    found = False
+    for elem in dom.getElementsByTagName("acl_role"):
+        if elem.getAttribute("id") == role_id:
+            found = True
+            elem.parentNode.removeChild(elem)
+            break
+    if not found:
+        utils.err("unable to find acl role: %s" % role_id)
+
+    # Remove any references to this role in acl_target or acl_group
+    for elem in dom.getElementsByTagName("role"):
+        if elem.getAttribute("id") == role_id:
+            user_group = elem.parentNode
+            user_group.removeChild(elem)
+            if "--autodelete" in utils.pcs_options:
+                if not user_group.getElementsByTagName("role"):
+                    user_group.parentNode.removeChild(user_group)
+
+    utils.replace_cib_configuration(dom)
+
+def run_role_assign(argv):
+    if len(argv) < 2:
+        usage.acl(["role assign"])
+        sys.exit(1)
+
+    if len(argv) == 2:
+        role_id = argv[0]
+        ug_id = argv[1]
+    elif len(argv) > 2 and argv[1] == "to":
+        role_id = argv[0]
+        ug_id = argv[2]
     else:
-        acls = acls[0]
-    return (dom,acls)
-
-def add_permissions_to_role(role_element, argv):
-    dom = role_element.ownerDocument
-    role_id = role_element.getAttribute("id")
-    while argv:
-        if len(argv) < 3:
-            return False
-        rwd = argv.pop(0).lower()
-        if not rwd in ["read", "write", "deny"]:
-            return False
-        se = dom.createElement("acl_permission")
-        se.setAttribute("id", utils.find_unique_id(dom, role_id + "-" + rwd))
-        se.setAttribute("kind", rwd)
-        xp_id = argv.pop(0).lower()
-        if xp_id == "xpath":
-            xpath_query = argv.pop(0)
-            se.setAttribute("xpath", xpath_query)
-        elif xp_id == "id":
-            acl_ref = argv.pop(0)
-            se.setAttribute("reference", acl_ref)
-        else:
-            return False
-        role_element.appendChild(se)
-    return True
+        usage.acl(["role assign"])
+        sys.exit(1)
+
+    dom = utils.get_cib_dom()
+    found = False
+    for role in dom.getElementsByTagName("acl_role"):
+        if role.getAttribute("id") == role_id:
+            found = True
+            break
+
+    if not found:
+        utils.err("cannot find role: %s" % role_id)
+
+    found = False
+    for ug in dom.getElementsByTagName("acl_target") + dom.getElementsByTagName("acl_group"):
+        if ug.getAttribute("id") == ug_id:
+            found = True
+            break
+
+    if not found:
+        utils.err("cannot find user or group: %s" % ug_id)
+
+    for current_role in ug.getElementsByTagName("role"):
+        if current_role.getAttribute("id") == role_id:
+            utils.err(role_id + " is already assigned to " + ug_id)
+
+    new_role = dom.createElement("role")
+    new_role.setAttribute("id", role_id)
+    ug.appendChild(new_role)
+    utils.replace_cib_configuration(dom)
+
+def run_role_unassign(argv):
+    if len(argv) < 2:
+        usage.acl(["role unassign"])
+        sys.exit(1)
+
+    role_id = argv.pop(0)
+    if len(argv) > 1 and argv[0] == "from":
+        ug_id = argv[1]
+    else:
+        ug_id = argv[0]
+
+    dom = utils.get_cib_dom()
+    found = False
+    for ug in dom.getElementsByTagName("acl_target") + dom.getElementsByTagName("acl_group"):
+        if ug.getAttribute("id") == ug_id:
+            found = True
+            break
+
+    if not found:
+        utils.err("cannot find user or group: %s" % ug_id)
+
+    found = False
+    for current_role in ug.getElementsByTagName("role"):
+        if current_role.getAttribute("id") == role_id:
+            found = True
+            current_role.parentNode.removeChild(current_role)
+            break
+
+    if not found:
+        utils.err("cannot find role: %s, assigned to user/group: %s" % (role_id, ug_id))
+
+    if "--autodelete" in utils.pcs_options:
+        if not ug.getElementsByTagName("role"):
+            ug.parentNode.removeChild(ug)
+
+    utils.replace_cib_configuration(dom)
+
+def run_permission_add(argv):
+    if len(argv) < 4:
+        raise CmdLineInputError()
+    role_id = argv.pop(0)
+    permission_info_list = argv_to_permission_info_list(argv)
+
+    dom = utils.get_cib_dom()
+    provide_role(dom, role_id)
+    add_permissions_to_role(dom, role_id, permission_info_list)
+    utils.replace_cib_configuration(dom)
+
+def run_permission_delete(argv):
+    dom = utils.get_cib_dom()
+    if len(argv) < 1:
+        usage.acl(["permission delete"])
+        sys.exit(1)
+
+    perm_id = argv.pop(0)
+    found = False
+    for elem in dom.getElementsByTagName("acl_permission"):
+        if elem.getAttribute("id") == perm_id:
+            elem.parentNode.removeChild(elem)
+            found = True
+    if not found:
+        utils.err("Unable to find permission with id: %s" % perm_id)
 
+    utils.replace_cib_configuration(dom)
diff --git a/pcs/cluster.py b/pcs/cluster.py
index e7c408f..826f8d6 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -30,7 +30,8 @@ import prop
 import resource
 import stonith
 import constraint
-
+from errors import ReportItem
+from errors import error_codes
 
 pcs_dir = os.path.dirname(os.path.realpath(__file__))
 
@@ -359,7 +360,7 @@ def cluster_setup(argv):
         )
     if udpu_rrp and "rrp_mode" not in options["transport_options"]:
         options["transport_options"]["rrp_mode"] = "passive"
-    cluster_setup_print_messages(messages)
+    utils.process_library_reports(messages)
 
     # prepare config file
     if is_rhel6:
@@ -377,7 +378,7 @@ def cluster_setup(argv):
             options["totem_options"],
             options["quorum_options"]
         )
-    cluster_setup_print_messages(messages)
+    utils.process_library_reports(messages)
 
     # setup on the local node
     if "--local" in utils.pcs_options:
@@ -496,37 +497,37 @@ def cluster_setup_parse_options_corosync(options):
     if "--transport" in options:
         transport = options["--transport"]
         if transport not in ("udp", "udpu"):
-            messages.append({
-                "text": "unknown transport '{0}'".format(transport),
-                "type": "error",
-                "forceable": True,
-            })
+            messages.append(ReportItem.error(
+                error_codes.UNKNOWN_TRANSPORT,
+                "unknown transport '{transport}'",
+                info={'transport': transport},
+                forceable=True,
+            ))
     parsed["transport_options"]["transport"] = transport
 
     if transport == "udpu" and ("--addr0" in options or "--addr1" in options):
-        messages.append({
-            "text": "--addr0 and --addr1 can only be used with --transport=udp",
-            "type": "error",
-            "forceable": False,
-        })
-
+        messages.append(ReportItem.error(
+            error_codes.NON_UDP_TRANSPORT_ADDR_MISMATCH,
+            '--addr0 and --addr1 can only be used with --transport=udp',
+        ))
     rrpmode = None
     if "--rrpmode" in options or "--addr0" in options:
         rrpmode = "passive"
         if "--rrpmode" in options:
             rrpmode = options["--rrpmode"]
         if rrpmode not in ("passive", "active"):
-            messages.append({
-                "text": "{0} is an unknown RRP mode".format(rrpmode),
-                "type": "error",
-                "forceable": True,
-            })
+            messages.append(ReportItem.error(
+                error_codes.UNKNOWN_RRP_MODE,
+                '{rrpmode} is an unknown RRP mode',
+                info={'rrpmode': rrpmode},
+                forceable=True,
+            ))
         if rrpmode == "active":
-            messages.append({
-                "text": "using a RRP mode of 'active' is not supported or tested",
-                "type": "error",
-                "forceable": True,
-            })
+            messages.append(ReportItem.error(
+                error_codes.RRP_ACTIVE_NOT_SUPPORTED,
+                "using a RRP mode of 'active' is not supported or tested",
+                forceable=True,
+            ))
     if rrpmode:
         parsed["transport_options"]["rrp_mode"] = rrpmode
 
@@ -583,14 +584,20 @@ def cluster_setup_parse_options_corosync(options):
     for opt_name in (
         "--wait_for_all", "--auto_tie_breaker", "--last_man_standing"
     ):
-        if opt_name in options and options[opt_name] not in ("0", "1"):
-            messages.append({
-                "text": "'{0}' is not a valid value for {1}, use 0 or 1".format(
-                    options[opt_name], opt_name
-                ),
-                "type": "error",
-                "forceable": False,
-            })
+        allowed_values = ('0', '1')
+        if opt_name in options and options[opt_name] not in allowed_values:
+            messages.append(ReportItem.error(
+                error_codes.INVALID_OPTION_VALUE,
+                "'{option_value}' is not a valid value for {option_name}, "
+                    +"use {allowed_values}"
+                ,
+                info={
+                    'option_name': opt_name,
+                    'option_value': options[opt_name],
+                    'allowed_values_raw': allowed_values,
+                    'allowed_values': ' or '.join(allowed_values),
+                },
+            ))
 
     return parsed, messages
 
@@ -613,41 +620,38 @@ def cluster_setup_parse_options_cman(options):
         if "--broadcast1" not in options:
             ring_missing_broadcast = "1"
         if ring_missing_broadcast:
-            messages.append({
-                "text": (
-                    "Enabling broadcast for ring {0} as CMAN does not support "
-                    + "broadcast in only one ring"
-                ).format(ring_missing_broadcast),
-                "type": "warning",
-                "forceable": False,
-            })
+            messages.append(ReportItem.warning(
+                error_codes.CMAN_BROADCAST_ALL_RINGS,
+                'Enabling broadcast for ring {ring_missing_broadcast}'
+                    +' as CMAN does not support broadcast in only one ring'
+                ,
+                info={'ring_missing_broadcast': ring_missing_broadcast}
+            ))
     else:
         transport = "udp"
         if "--transport" in options:
             transport = options["--transport"]
             if transport not in ("udp", "udpu"):
-                messages.append({
-                    "text": "unknown transport '{0}'".format(transport),
-                    "type": "error",
-                    "forceable": True,
-                })
+                messages.append(ReportItem.error(
+                    error_codes.UNKNOWN_TRANSPORT,
+                    "unknown transport '{transport}'",
+                    info={'transport': transport},
+                    forceable=True,
+                ))
     parsed["transport_options"]["transport"] = transport
 
     if transport == "udpu":
-        messages.append({
-            "text": (
-                "Using udpu transport on a CMAN cluster, "
+        messages.append(ReportItem.warning(
+            error_codes.CMAN_UDPU_RESTART_REQUIRED,
+            "Using udpu transport on a CMAN cluster, "
                 + "cluster restart is required after node add or remove"
-            ),
-            "type": "warning",
-            "forceable": False,
-        })
+            ,
+        ))
     if transport == "udpu" and ("--addr0" in options or "--addr1" in options):
-        messages.append({
-            "text": "--addr0 and --addr1 can only be used with --transport=udp",
-            "type": "error",
-            "forceable": False,
-        })
+        messages.append(ReportItem.error(
+            error_codes.NON_UDP_TRANSPORT_ADDR_MISMATCH,
+            '--addr0 and --addr1 can only be used with --transport=udp',
+        ))
 
     rrpmode = None
     if "--rrpmode" in options or "--addr0" in options:
@@ -655,17 +659,18 @@ def cluster_setup_parse_options_cman(options):
         if "--rrpmode" in options:
             rrpmode = options["--rrpmode"]
         if rrpmode not in ("passive", "active"):
-            messages.append({
-                "text": "{0} is an unknown RRP mode".format(rrpmode),
-                "type": "error",
-                "forceable": True,
-            })
+            messages.append(ReportItem.error(
+                error_codes.UNKNOWN_RRP_MODE,
+                '{rrpmode} is an unknown RRP mode',
+                info={'rrpmode': rrpmode},
+                forceable=True,
+            ))
         if rrpmode == "active":
-            messages.append({
-                "text": "using a RRP mode of 'active' is not supported or tested",
-                "type": "error",
-                "forceable": True,
-            })
+            messages.append(ReportItem.error(
+                error_codes.RRP_ACTIVE_NOT_SUPPORTED,
+                "using a RRP mode of 'active' is not supported or tested",
+                forceable=True,
+            ))
     if rrpmode:
         parsed["transport_options"]["rrp_mode"] = rrpmode
 
@@ -708,14 +713,11 @@ def cluster_setup_parse_options_cman(options):
     )
     for opt_name in ignored_options_names:
         if opt_name in options:
-            text = "{0} ignored as it is not supported on CMAN clusters".format(
-                opt_name
-            )
-            messages.append({
-                "text": text,
-                "type": "warning",
-                "forceable": False,
-            })
+            messages.append(ReportItem.warning(
+                error_codes.IGNORED_CMAN_UNSUPPORTED_OPTION,
+                '{option_name} ignored as it is not supported on CMAN clusters',
+                info={'option_name': opt_name}
+            ))
 
     return parsed, messages
 
@@ -928,15 +930,12 @@ def cluster_setup_create_cluster_conf(
         output, retval = utils.run(cmd_prefix + cmd_item["cmd"])
         if retval != 0:
             if output:
-                messages.append({
-                    "text": output,
-                    "type": "plain",
-                })
-            messages.append({
-                "text": cmd_item["err"],
-                "type": "error",
-                "forceable": False,
-            })
+                messages.append(
+                    ReportItem.info(error_codes.COMMON_INFO, output)
+                )
+            messages.append(
+                ReportItem.error(error_codes.COMMON_ERROR, cmd_item["err"])
+            )
             conf_temp.close()
             return "", messages
     conf_temp.seek(0)
@@ -944,28 +943,6 @@ def cluster_setup_create_cluster_conf(
     conf_temp.close()
     return cluster_conf, messages
 
-def cluster_setup_print_messages(messages):
-    critical_error = False
-    for msg in messages:
-        if msg["type"] == "error":
-            if msg["forceable"] and "--force" in utils.pcs_options:
-                # Let the user know what may be wrong even when --force is used,
-                # as it may be used for override early errors hiding later
-                # errors otherwise.
-                print("Warning: " + msg["text"])
-                continue
-            text = msg["text"]
-            if msg["forceable"]:
-                text += ", use --force to override"
-            critical_error = True
-            utils.err(text, False)
-        elif msg["type"] == "warning":
-            print("Warning: " + msg["text"])
-        else:
-            print(msg["text"])
-    if critical_error:
-        sys.exit(1)
-
 def get_local_network():
     args = ["/sbin/ip", "route"]
     p = subprocess.Popen(args, stdout=subprocess.PIPE)
@@ -1577,7 +1554,7 @@ def cluster_uidgid_rhel6(argv, silent_list = False):
         if not found and not silent_list:
             print("No uidgids configured in cluster.conf")
         return
-    
+
     command = argv.pop(0)
     uid=""
     gid=""
@@ -1609,7 +1586,7 @@ def cluster_uidgid_rhel6(argv, silent_list = False):
         # If we make a change, we sync out the changes to all nodes unless we're using -f
         if not utils.usefile:
             sync_nodes(utils.getNodesFromCorosyncConf(), utils.getCorosyncConf())
-         
+
     else:
         usage.cluster(["uidgid"])
         exit(1)
@@ -1664,7 +1641,7 @@ def cluster_uidgid(argv, silent_list = False):
             retval = utils.remove_uid_gid_file(uid,gid)
             if retval == False:
                 utils.err("no uidgid files with uid=%s and gid=%s found" % (uid,gid))
-         
+
     else:
         usage.cluster(["uidgid"])
         exit(1)
@@ -1728,7 +1705,7 @@ def cluster_verify(argv):
         nofilename = False
     elif len(argv) > 1:
         usage.cluster("verify")
-    
+
     options = []
     if "-V" in utils.pcs_options:
         options.append("-V")
diff --git a/pcs/error_codes.py b/pcs/error_codes.py
new file mode 100644
index 0000000..9c08f1e
--- /dev/null
+++ b/pcs/error_codes.py
@@ -0,0 +1,23 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+ACL_ROLE_ALREADY_EXISTS = 'ACL_ROLE_ALREADY_EXISTS'
+ACL_ROLE_NOT_FOUND = 'ACL_ROLE_NOT_FOUND'
+BAD_ACL_PERMISSION = 'BAD_ACL_PERMISSION'
+BAD_ACL_SCOPE_TYPE = 'BAD_ACL_SCOPE_TYPE'
+CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS'
+CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
+COMMON_ERROR = 'COMMON_ERROR'
+COMMON_INFO = 'COMMON_INFO'
+ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
+ID_IS_NOT_VALID = 'ID_IS_NOT_VALID'
+ID_NOT_FOUND = 'ID_NOT_FOUND'
+IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
+INVALID_OPTION_VALUE = 'INVALID_OPTION_VALUE'
+NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH'
+RRP_ACTIVE_NOT_SUPPORTED = 'RRP_ACTIVE_NOT_SUPPORTED'
+UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
+UNKNOWN_RRP_MODE = 'UNKNOWN_RRP_MODE'
+UNKNOWN_TRANSPORT = 'UNKNOWN_TRANSPORT'
diff --git a/pcs/errors.py b/pcs/errors.py
new file mode 100644
index 0000000..8c93cb0
--- /dev/null
+++ b/pcs/errors.py
@@ -0,0 +1,38 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import usage
+import error_codes
+
+class CmdLineInputError(Exception):
+    pass
+
+class ReportItemSeverity(object):
+    ERROR = 'ERROR'
+    WARNING = 'WARNING'
+    INFO = 'INFO'
+
+class ReportItem(object):
+    @classmethod
+    def error(cls, code, message_pattern, **kwargs):
+        return cls(code, ReportItemSeverity.ERROR, message_pattern, **kwargs)
+
+    @classmethod
+    def warning(cls, code, message_pattern, **kwargs):
+        return cls(code, ReportItemSeverity.WARNING, message_pattern, **kwargs)
+
+    @classmethod
+    def info(cls, code, message_pattern, **kwargs):
+        return cls(code, ReportItemSeverity.INFO, message_pattern, **kwargs)
+
+    def __init__(
+        self, code, severity, message_pattern, forceable=False, info=None
+    ):
+        self.code = code
+        self.severity = severity
+        self.forceable = forceable
+        self.message_pattern=message_pattern
+        self.info = info if info else dict()
+        self.message = self.message_pattern.format(**self.info)
diff --git a/pcs/library_acl.py b/pcs/library_acl.py
new file mode 100644
index 0000000..36de4ec
--- /dev/null
+++ b/pcs/library_acl.py
@@ -0,0 +1,135 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import utils
+from errors import ReportItem
+from errors import ReportItemSeverity
+from errors import error_codes
+
+class LibraryError(Exception):
+    pass
+
+class AclRoleNotFound(LibraryError):
+    pass
+
+def __validate_role_id_for_create(dom, role_id):
+    id_valid, message = utils.validate_xml_id(role_id, 'ACL role')
+    if not id_valid:
+        raise LibraryError(ReportItem.error(
+            error_codes.ID_IS_NOT_VALID,
+            message,
+            info={'id': role_id}
+        ))
+    if utils.dom_get_element_with_id(dom, "acl_role", role_id):
+        raise LibraryError(ReportItem.error(
+            error_codes.ACL_ROLE_ALREADY_EXISTS,
+            'role {id} already exists',
+            info={'id': role_id}
+        ))
+    if utils.does_id_exist(dom, role_id):
+        raise LibraryError(ReportItem.error(
+            error_codes.ID_ALREADY_EXISTS,
+            '{id} already exists',
+            info={'id': role_id}
+        ))
+
+def __validate_permissions(dom, permission_info_list):
+    report = []
+    allowed_permissions = ["read", "write", "deny"]
+    allowed_scopes = ["xpath", "id"]
+    for permission, scope_type, scope in permission_info_list:
+        if not permission in allowed_permissions:
+            report.append(ReportItem.error(
+                error_codes.BAD_ACL_PERMISSION,
+                'bad permission "{permission}, expected {allowed_values}',
+                info={
+                    'permission': permission,
+                    'allowed_values_raw': allowed_permissions,
+                    'allowed_values': ' or '.join(allowed_permissions)
+                },
+            ))
+
+        if not scope_type in allowed_scopes:
+            report.append(ReportItem.error(
+                error_codes.BAD_ACL_SCOPE_TYPE,
+                'bad scope type "{scope_type}, expected {allowed_values}',
+                info={
+                    'scope_type': scope_type,
+                    'allowed_values_raw': allowed_scopes,
+                    'allowed_values': ' or '.join(allowed_scopes)
+                },
+            ))
+
+        if scope_type == 'id' and not utils.does_id_exist(dom, scope):
+            report.append(ReportItem.error(
+                error_codes.ID_NOT_FOUND,
+                'id "{id}" does not exist.',
+                info={'id': scope },
+            ))
+
+    if report:
+        raise LibraryError(*report)
+
+def __find_role(dom, role_id):
+    for role in dom.getElementsByTagName("acl_role"):
+        if role.getAttribute("id") == role_id:
+            return role
+
+    raise AclRoleNotFound(ReportItem.error(
+        error_codes.ACL_ROLE_NOT_FOUND,
+        'role id "{role_id}" does not exist.',
+        info={'role_id': role_id},
+    ))
+
+def create_role(dom, role_id, description=''):
+    """
+    role_id id of desired role
+    description role description
+    """
+    __validate_role_id_for_create(dom, role_id)
+    role = dom.createElement("acl_role")
+    role.setAttribute("id",role_id)
+    if description != "":
+        role.setAttribute("description", description)
+    acls = utils.get_acls(dom)
+    acls.appendChild(role)
+
+def provide_role(dom, role_id):
+    """
+    role_id id of desired role
+    description role description
+    """
+    try:
+        __find_role(dom, role_id)
+    except AclRoleNotFound:
+        create_role(dom, role_id)
+
+def add_permissions_to_role(dom, role_id, permission_info_list):
+    """
+    dom document node
+    role_id value of atribute id, which exists in dom
+    permission_info_list list of tuples,
+        each contains (permission, scope_type, scope)
+    """
+    __validate_permissions(dom, permission_info_list)
+
+    area_type_attribute_map = {
+        'xpath': 'xpath',
+        'id': 'reference',
+    }
+    for permission, scope_type, scope in permission_info_list:
+        se = dom.createElement("acl_permission")
+        se.setAttribute(
+            "id",
+            utils.find_unique_id(dom, role_id + "-" + permission)
+        )
+        se.setAttribute("kind", permission)
+        se.setAttribute(area_type_attribute_map[scope_type], scope)
+        __find_role(dom, role_id).appendChild(se)
+
+def remove_permissions_referencing(dom, reference):
+    for permission in dom.getElementsByTagName("acl_permission"):
+        if permission.getAttribute("reference") == reference:
+            permission.parentNode.removeChild(permission)
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 1f15b18..86ecdd9 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "December 2015" "pcs 0.9.148" "System Administration Utilities"
+.TH PCS "8" "February 2016" "pcs 0.9.149" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -61,8 +61,8 @@ Show list of all available resources, optionally filtered by specified type, sta
 describe <standard:provider:type|type>
 Show options for the specified resource
 .TP
-create <resource id> <standard:provider:type|type> [resource options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-clone\fR <clone options> | \fB\-\-master\fR <master options> | \fB\-\-group\fR <group name> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
-Create specified resource.  If \fB\-\-clone\fR is used a clone resource is created if \fB\-\-master\fR is specified a master/slave resource is created.  If \fB\-\-group\fR is specified the resource is added to the group named.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group.  If \fB\-\-disabled\fR is specified the resource is not started automatically.  If \fB\-\-wait\fR is specified, [...]
+create <resource id> <standard:provider:type|type> [resource options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-clone\fR <clone options> | \fB\-\-master\fR <master options> | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
+Create specified resource.  If \fB\-\-clone\fR is used a clone resource is created.  If \fB\-\-master\fR is specified a master/slave resource is created.  If \fB\-\-group\fR is specified the resource is added to the group named.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group.  If \fB\-\-disabled\fR is specified the resource is not started automatically.  If \fB\-\-wait\fR is specifie [...]
 
 Example: Create a new resource called 'VirtualIP' with IP address 192.168.0.99, netmask of 32, monitored everything 30 seconds, on eth2: pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 nic=eth2 op monitor interval=30s
 .TP
@@ -129,22 +129,22 @@ Set default values for operations, if no options are passed, lists currently con
 meta <resource id | group id | master id | clone id> <meta options> [\fB\-\-wait\fR[=n]]
 Add specified options to the specified resource, group, master/slave or clone.  Meta options should be in the format of name=value, options may be removed by setting an option without a value.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise.  If 'n' is not specified it defaults to 60 minutes.  Example: pcs resource meta TestResource failure\-timeout=50 stickiness=
 .TP
-group add <group name> <resource id> [resource id] ... [resource id] [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>] [\fB\-\-wait\fR[=n]]
+group add <group id> <resource id> [resource id] ... [resource id] [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>] [\fB\-\-wait\fR[=n]]
 Add the specified resource to the group, creating the group if it does not exist.  If the resource is present in another group it is moved to the new group.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resources relatively to some resource already existing in the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error. [...]
 .TP
-group remove <group name> <resource id> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
+group remove <group id> <resource id> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
 Remove the specified resource(s) from the group, removing the group if it no resources remain.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
-ungroup <group name> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
+ungroup <group id> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
 Remove the group (Note: this does not remove any resources from the cluster) or if resources are specified, remove the specified resources from the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and the return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 clone <resource id | group id> [clone options]... [\fB\-\-wait\fR[=n]]
 Setup up the specified resource or group as a clone.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting clone instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
-unclone <resource id | group name> [\fB\-\-wait\fR[=n]]
+unclone <resource id | group id> [\fB\-\-wait\fR[=n]]
 Remove the clone which contains the specified group or resource (the resource or group will not be removed).  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including stopping clone instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
-master [<master/slave name>] <resource id | group name> [options] [\fB\-\-wait\fR[=n]]
+master [<master/slave id>] <resource id | group id> [options] [\fB\-\-wait\fR[=n]]
 Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.  Note: to remove a master you must remove the resource/group it contains.
 .TP
 manage <resource id> ... [resource n]
@@ -353,34 +353,34 @@ Enable access control lists
 disable
 Disable access control lists
 .TP
-role create <role name> [description=<description>] [((read | write | deny) (xpath <query> | id <id>))...]
-Create a role with the name and (optional) description specified.
+role create <role id> [description=<description>] [((read | write | deny) (xpath <query> | id <id>))...]
+Create a role with the id and (optional) description specified.
 Each role can also have an unlimited number of permissions
 (read/write/deny) applied to either an xpath query or the id
 of a specific element in the cib
 .TP
-role delete <role name>
+role delete <role id>
 Delete the role specified and remove it from any users/groups it was assigned to
 .TP
-role assign <role name> [to] <username/group>
+role assign <role id> [to] <username/group>
 Assign a role to a user or group already created with 'pcs acl user/group create'
 .TP
-role unassign <role name> [from] <username/group>
+role unassign <role id> [from] <username/group>
 Remove a role from the specified user
 .TP
-user create <username> <role name> [<role name>]...
+user create <username> <role id> [<role id>]...
 Create an ACL for the user specified and assign roles to the user
 .TP
 user delete <username>
 Remove the user specified (and roles assigned will be unassigned for the specified user)
 .TP
-group create <group> <role name> [<role name>]...
+group create <group> <role id> [<role id>]...
 Create an ACL for the group specified and assign roles to the group
 .TP
 group delete <group>
 Remove the group specified (and roles assigned will be unassigned for the specified group)
 .TP
-permission add <role name> ((read | write | deny) (xpath <query> | id <id>))...
+permission add <role id> ((read | write | deny) (xpath <query> | id <id>))...
 Add the listed permissions to the role specified
 .TP
 permission delete <permission id>
@@ -390,7 +390,7 @@ Remove the permission id specified (permission id's are listed in parenthesis af
 list|show [<property> | \fB\-\-all\fR | \fB\-\-defaults\fR]
 List property settings (default: lists configured properties).  If \fB\-\-defaults\fR is specified will show all property defaults, if \fB\-\-all\fR is specified, current configured properties will be shown with unset properties and their defaults.  Run 'man pengine' and 'man crmd' to get a description of the properties.
 .TP
-set [\fB\-\-force\fR] [\fB\-\-node\fR <nodename>] <property>=[<value>]
+set [\fB\-\-force\fR | \fB\-\-node\fR <nodename>] <property>=[<value>] [<property>=[<value>] ...]
 Set specific pacemaker properties (if the value is blank then the property is removed from the configuration).  If a property is not recognized by pcs the property will not be created unless the \fB\-\-force\fR is used. If \fB\-\-node\fR is used a node attribute is set on the specified node.  Run 'man pengine' and 'man crmd' to get a description of the properties.
 .TP
 unset [\fB\-\-node\fR <nodename>] <property>
@@ -430,11 +430,11 @@ where duration options and date spec options are: hours, monthdays, weekdays, ye
 location show [resources|nodes [node id|resource id]...] [\fB\-\-full\fR]
 List all the current location constraints, if 'resources' is specified location constraints are displayed per resource (default), if 'nodes' is specified location constraints are displayed per node.  If specific nodes or resources are specified then we only show information about them.  If \fB\-\-full\fR is specified show the internal constraint id's as well.
 .TP
-location add <id> <resource name> <node> <score> [resource-discovery=<option>]
-Add a location constraint with the appropriate id, resource name, node name and score. (For more advanced pacemaker usage)
+location add <id> <resource id> <node> <score> [resource-discovery=<option>]
+Add a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage)
 .TP
-location remove <id> [<resource name> <node> <score>]
-Remove a location constraint with the appropriate id, resource name, node name and score. (For more advanced pacemaker usage)
+location remove <id> [<resource id> <node> <score>]
+Remove a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage)
 .TP
 order show [\fB\-\-full\fR]
 List all current ordering constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
@@ -491,8 +491,8 @@ rule remove <rule id>
 Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed
 .SS "status"
 .TP
-[status] [\fB\-\-full\fR]
-View all information about the cluster and resources (\fB\-\-full\fR provides more details)
+[status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
+View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide-inactive\fR hides inactive resources)
 .TP
 resources
 View current status of cluster resources
@@ -509,8 +509,8 @@ View current membership information as seen by corosync
 nodes [corosync|both|config]
 View current status of nodes from pacemaker. If 'corosync' is specified, print nodes currently configured in corosync, if 'both' is specified, print nodes from both corosync & pacemaker.  If 'config' is specified, print nodes from corosync & pacemaker configuration.
 .TP
-pcsd <node> ...
-Show the current status of pcsd on the specified nodes
+pcsd [<node>] ...
+Show the current status of pcsd on the specified nodes. When no nodes are specified, status of all nodes is displayed.
 .TP
 xml
 View xml version of status (output from crm_mon \fB\-r\fR \fB\-1\fR \fB\-X\fR)
diff --git a/pcs/pcs.py b/pcs/pcs.py
index f41e334..bda6c0b 100755
--- a/pcs/pcs.py
+++ b/pcs/pcs.py
@@ -87,6 +87,8 @@ def main(argv):
             "miss_count_const=", "fail_recv_const=",
             "corosync_conf=", "cluster_conf=",
             "remote",
+            #in pcs status - do not display resorce status on inactive node
+            "hide-inactive",
         ]
         # pull out negative number arguments and add them back after getopt
         prev_arg = ""
diff --git a/pcs/prop.py b/pcs/prop.py
index 592e4d8..8a60611 100644
--- a/pcs/prop.py
+++ b/pcs/prop.py
@@ -4,44 +4,85 @@ from __future__ import print_function
 from __future__ import unicode_literals
 
 import sys
+import json
 from xml.dom.minidom import parseString
-import xml.etree.ElementTree as ET
 
 import usage
 import utils
-import settings
 
 def property_cmd(argv):
     if len(argv) == 0:
         argv = ["list"]
 
     sub_cmd = argv.pop(0)
-    if (sub_cmd == "help"):
+    if sub_cmd == "help":
         usage.property(argv)
-    elif (sub_cmd == "set"):
+    elif sub_cmd == "set":
         set_property(argv)
-    elif (sub_cmd == "unset"):
+    elif sub_cmd == "unset":
         unset_property(argv)
-    elif (sub_cmd == "list" or sub_cmd == "show"):
+    elif sub_cmd == "list" or sub_cmd == "show":
         list_property(argv)
+    elif sub_cmd == "get_cluster_properties_definition":
+        print(json.dumps(utils.get_cluster_properties_definition()))
     else:
         usage.property()
         sys.exit(1)
 
+
 def set_property(argv):
+    if not argv:
+        usage.property(['set'])
+        sys.exit(1)
+
+    prop_def_dict = utils.get_cluster_properties_definition()
+    nodes_attr = "--node" in utils.pcs_options
+    failed = False
+    forced = "--force" in utils.pcs_options
+    properties = {}
     for arg in argv:
         args = arg.split('=')
-        if (len(args) != 2):
-            print("Invalid Property: " + arg)
-            continue
-        if "--node" in utils.pcs_options:
-            utils.set_node_attribute(args[0], args[1], utils.pcs_options["--node"])
-        elif ("--force" in utils.pcs_options) or utils.is_valid_property(args[0]):
-            if not args[0]:
-                utils.err("property name cannot be empty")
-            utils.set_cib_property(args[0],args[1])
+        if len(args) != 2:
+            utils.err("invalid property format: '{0}'".format(arg), False)
+            failed = True
+        elif not args[0]:
+            utils.err("empty property name: '{0}'".format(arg), False)
+            failed = True
+        elif nodes_attr or forced or args[1].strip() == "":
+            properties[args[0]] = args[1]
         else:
-            utils.err("unknown cluster property: '%s', (use --force to override)" % args[0])
+            try:
+                if utils.is_valid_cluster_property(
+                    prop_def_dict, args[0], args[1]
+                ):
+                    properties[args[0]] = args[1]
+                else:
+                    utils.err(
+                        "invalid value of property: '{0}', (use --force to "
+                        "override)".format(arg),
+                        False
+                    )
+                    failed = True
+            except utils.UnknownPropertyException:
+                utils.err(
+                    "unknown cluster property: '{0}', (use --force to "
+                    "override)".format(args[0]),
+                    False
+                )
+                failed = True
+
+    if failed:
+        sys.exit(1)
+
+    if nodes_attr:
+        for prop, value in properties.items():
+            utils.set_node_attribute(prop, value, utils.pcs_options["--node"])
+    else:
+        cib_dom = utils.get_cib_dom()
+        for prop, value in properties.items():
+            utils.set_cib_property(prop, value, cib_dom)
+        utils.replace_cib_configuration(cib_dom)
+
 
 def unset_property(argv):
     if len(argv) < 1:
@@ -52,8 +93,10 @@ def unset_property(argv):
         for arg in argv:
             utils.set_node_attribute(arg, "",utils.pcs_options["--node"])
     else:
+        cib_dom = utils.get_cib_dom()
         for arg in argv:
-            utils.set_cib_property(arg, "")
+            utils.set_cib_property(arg, "", cib_dom)
+        utils.replace_cib_configuration(cib_dom)
 
 def list_property(argv):
     print_all = False
@@ -66,7 +109,7 @@ def list_property(argv):
         properties = get_default_properties()
     else:
         properties = {}
-        
+
     if "--defaults" not in utils.pcs_options:
         properties = get_set_properties(
             None if print_all else argv[0],
@@ -87,32 +130,10 @@ def list_property(argv):
             print(" ".join(line_parts))
 
 def get_default_properties():
-    (output, retVal) = utils.run([settings.pengine_binary, "metadata"])
-    if retVal != 0:
-        utils.err("unable to get pengine metadata\n"+output)
-    pe_root = ET.fromstring(output)
-
-    (output, retVal) = utils.run([settings.crmd_binary, "metadata"])
-    if retVal != 0:
-        utils.err("unable to get crmd metadata\n"+output)
-    crmd_root = ET.fromstring(output)
-    
-    (output, retVal) = utils.run([settings.cib_binary, "metadata"])
-    if retVal != 0:
-        utils.err("unable to get cib metadata\n"+output)
-    cib_root = ET.fromstring(output)
-
     parameters = {}
-    for root in [pe_root, crmd_root, cib_root]:
-        for param in root.getiterator('parameter'):
-            name = param.attrib["name"]
-            content = param.find("content")
-            if content is not None:
-                default = content.attrib["default"]
-            else:
-                default = ""
-
-            parameters[name] =  default
+    prop_def_dict = utils.get_cluster_properties_definition()
+    for name, prop in prop_def_dict.items():
+        parameters[name] = prop["default"]
     return parameters
 
 def get_set_properties(prop_name=None, defaults=None):
diff --git a/pcs/resource.py b/pcs/resource.py
index 89e7ac9..022732b 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -16,6 +16,7 @@ import usage
 import utils
 import constraint
 import stonith
+import library_acl as lib_acl
 
 
 PACEMAKER_WAIT_TIMEOUT_STATUS = 62
@@ -886,7 +887,7 @@ def resource_update(res_id,args):
         resource.appendChild(instance_attributes)
     else:
         instance_attributes = instance_attributes[0]
-    
+
     params = utils.convert_args_to_tuples(ra_values)
     if not "--force" in utils.pcs_options and (resource.getAttribute("class") == "ocf" or resource.getAttribute("class") == "stonith"):
         resClass = resource.getAttribute("class")
@@ -926,7 +927,7 @@ def resource_update(res_id,args):
         resource.appendChild(meta_attributes)
     else:
         meta_attributes = meta_attributes[0]
-    
+
     meta_attrs = utils.convert_args_to_tuples(meta_values)
     for (key,val) in meta_attrs:
         meta_found = False
@@ -1277,7 +1278,7 @@ def get_full_ra_type(ra_type, return_string = False):
         else:
             ra_type = "ocf:heartbeat:" + ra_type
 
-    
+
     if return_string:
         return ra_type
 
@@ -1591,7 +1592,7 @@ def resource_master_create(dom, argv, update=False, master_id=None):
         # element in the group, we remove the group
         if resource.parentNode.tagName == "group" and resource.parentNode.getElementsByTagName("primitive").length <= 1:
             resource.parentNode.parentNode.removeChild(resource.parentNode)
-        
+
         master_element = dom.createElement("master")
         if master_id_autogenerated:
             master_element.setAttribute(
@@ -1823,6 +1824,7 @@ def remove_resource_references(
         resource_id, output, constraints_element, dom
     )
     stonith.stonith_level_rm_device(dom, resource_id)
+    lib_acl.remove_permissions_referencing(dom, resource_id)
     return dom
 
 # This removes a resource from a group, but keeps it in the config
@@ -2553,7 +2555,7 @@ def resource_history(args):
             resources[res_id] = {}
         for rsc_op in res.getElementsByTagName("lrm_rsc_op"):
             resources[res_id][rsc_op.getAttribute("call-id")] = [res_id, rsc_op]
-    
+
     for res in sorted(resources):
         print("Resource: %s" % res)
         for cid in sorted(resources[res]):
@@ -2618,7 +2620,7 @@ def resource_relocate_set_stickiness(cib_dom, resources=None):
                 meta_attributes = utils.dom_prepare_child_element(
                     res_or_child,
                     "meta_attributes",
-                    res_or_child.getAttribute("id") + "-"
+                    res_or_child.getAttribute("id") + "-meta_attributes"
                 )
                 utils.dom_update_nv_pair(
                     meta_attributes,
diff --git a/pcs/settings.py b/pcs/settings.py
index 4cdc010..e329e3f 100644
--- a/pcs/settings.py
+++ b/pcs/settings.py
@@ -8,7 +8,7 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.148"
+pcs_version = "0.9.149"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
diff --git a/pcs/settings.py.x86_64-linux-gnu.debian b/pcs/settings.py.debian
similarity index 73%
rename from pcs/settings.py.x86_64-linux-gnu.debian
rename to pcs/settings.py.debian
index cf2c256..3e3e6cf 100644
--- a/pcs/settings.py.x86_64-linux-gnu.debian
+++ b/pcs/settings.py.debian
@@ -4,11 +4,11 @@ ccs_binaries = "/usr/sbin/"
 corosync_conf_file = "/etc/corosync/corosync.conf"
 cluster_conf_file = "/etc/cluster/cluster.conf"
 fence_agent_binaries = "/usr/sbin/"
-pengine_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/pengine"
-crmd_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/crmd"
-cib_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/cib"
-stonithd_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/stonithd"
-pcs_version = "0.9.148"
+pengine_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/pengine"
+crmd_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/crmd"
+cib_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/cib"
+stonithd_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/stonithd"
+pcs_version = "0.9.149"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
diff --git a/pcs/settings.py.i386-linux-gnu.debian b/pcs/settings.py.i386-linux-gnu.debian
deleted file mode 100644
index c13d92b..0000000
--- a/pcs/settings.py.i386-linux-gnu.debian
+++ /dev/null
@@ -1,23 +0,0 @@
-pacemaker_binaries = "/usr/sbin/"
-corosync_binaries = "/usr/sbin/"
-ccs_binaries = "/usr/sbin/"
-corosync_conf_file = "/etc/corosync/corosync.conf"
-cluster_conf_file = "/etc/cluster/cluster.conf"
-fence_agent_binaries = "/usr/sbin/"
-pengine_binary = "/usr/lib/i386-linux-gnu/pacemaker/pengine"
-crmd_binary = "/usr/lib/i386-linux-gnu/pacemaker/crmd"
-cib_binary = "/usr/lib/i386-linux-gnu/pacemaker/cib"
-stonithd_binary = "/usr/lib/i386-linux-gnu/pacemaker/stonithd"
-pcs_version = "0.9.148"
-crm_report = pacemaker_binaries + "crm_report"
-crm_verify = pacemaker_binaries + "crm_verify"
-pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
-pcsd_key_location = "/var/lib/pcsd/pcsd.key"
-pcsd_tokens_location = "/var/lib/pcsd/tokens"
-pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
-pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
-pcsd_exec_location = "/usr/share/pcsd/"
-corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
-cib_dir = "/var/lib/pacemaker/cib/"
-pacemaker_uname = "hacluster"
-pacemaker_gname = "haclient"
diff --git a/pcs/status.py b/pcs/status.py
index 6c41db5..25817b0 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -40,10 +40,18 @@ def status_cmd(argv):
         sys.exit(1)
 
 def full_status():
+    if "--hide-inactive" in utils.pcs_options and "--full" in utils.pcs_options:
+        utils.err("you cannot specify both --hide-inactive and --full")
+
+    monitor_command = ["crm_mon", "--one-shot"]
+    if "--hide-inactive" not in utils.pcs_options:
+        monitor_command.append('--inactive')
     if "--full" in utils.pcs_options:
-        (output, retval) = utils.run(["crm_mon", "-1", "-r", "-R", "-A", "-f"])
-    else:
-        (output, retval) = utils.run(["crm_mon", "-1", "-r"])
+        monitor_command.extend(
+            ["--show-detail", "--show-node-attributes", "--failcounts"]
+        )
+
+    output, retval = utils.run(monitor_command)
 
     if (retval != 0):
         utils.err("cluster is not currently running on this node")
diff --git a/pcs/test/Makefile b/pcs/test/Makefile
index 4a9bb35..34334d2 100644
--- a/pcs/test/Makefile
+++ b/pcs/test/Makefile
@@ -13,3 +13,4 @@ test:
 	$(PYTHON) test_properties.py ${pyunit_flags}
 	$(PYTHON) test_acl.py ${pyunit_flags}
 	$(PYTHON) test_node.py ${pyunit_flags}
+	$(PYTHON) test_library_acl.py ${pyunit_flags}
diff --git a/pcs/test/library_test_tools.py b/pcs/test/library_test_tools.py
new file mode 100644
index 0000000..9b6bcfc
--- /dev/null
+++ b/pcs/test/library_test_tools.py
@@ -0,0 +1,93 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import xml.dom.minidom
+from lxml.doctestcompare import LXMLOutputChecker
+from doctest import Example
+
+from library_acl import LibraryError
+
+class LibraryAssertionMixin(object):
+    def __find_report_info(self, report_info_list, report_item):
+        for report_info in report_info_list:
+            if(
+                report_item.severity == report_info[0]
+                and
+                report_item.code == report_info[1]
+                and
+                #checks only presence and match of expected in info,
+                #extra info is ignored
+                all(
+                    (k in report_item.info and report_item.info[k]==v)
+                    for k,v in report_info[2].iteritems()
+                )
+            ):
+                return report_info
+        raise AssertionError(
+            'Unexpected report given: {0}'
+            .format(repr((
+                report_item.severity, report_item.code, repr(report_item.info)
+            )))
+        )
+
+    def __check_error(self, e, report_info_list):
+        for report_item in e.args:
+            report_info_list.remove(
+                self.__find_report_info(report_info_list, report_item)
+            )
+
+        if report_info_list:
+            raise AssertionError(
+                'In the report from LibraryError was not present: '
+                +', '+repr(report_info_list)
+            )
+
+    def assert_raise_library_error(self, callableObj, *report_info_list):
+        if not report_info_list:
+            raise AssertionError(
+                'Raising LibraryError expected, but no report item specified.'
+                +' Please specify report items, that you expect in LibraryError'
+            )
+
+        try:
+            callableObj()
+            raise AssertionError('LibraryError not raised')
+        except LibraryError as e:
+            self.__check_error(e, list(report_info_list))
+
+    def assert_cib_equal(self, expected_cib, got_cib=None):
+        got_cib = got_cib if got_cib else self.cib
+        got_xml = got_cib.dom.toxml()
+        expected_xml = expected_cib.dom.toxml()
+
+        checker = LXMLOutputChecker()
+        if checker.check_output(expected_xml, got_xml, 0):
+            return
+
+        raise AssertionError(checker.output_difference(
+            Example("", expected_xml),
+            got_xml,
+            0
+        ))
+
+class CibManipulation(object):
+    def __init__(self, file_name):
+        self.dom = xml.dom.minidom.parse(file_name)
+
+    def __append_to_child(self, element, xml_string):
+        element.appendChild(
+            xml.dom.minidom.parseString(xml_string).firstChild
+        )
+
+    def append_to_first_tag_name(self, tag_name, xml_string):
+        self.__append_to_child(
+            self.dom.getElementsByTagName(tag_name)[0], xml_string
+        )
+        return self
+
+def get_cib_manipulation_creator(file_name):
+    def create_cib_manipulation():
+       return CibManipulation(file_name)
+    return create_cib_manipulation
diff --git a/pcs/test/pcs_test_assertions.py b/pcs/test/pcs_test_assertions.py
new file mode 100644
index 0000000..8e6fbc9
--- /dev/null
+++ b/pcs/test/pcs_test_assertions.py
@@ -0,0 +1,75 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import difflib
+
+def prepare_diff(first, second):
+    return ''.join(
+        difflib.Differ().compare(first.splitlines(1), second.splitlines(1))
+    )
+
+
+class AssertPcsMixin(object):
+    def assert_pcs_success(self, command, stdout_full=None, stdout_start=None):
+        full = stdout_full
+        if stdout_start is None and stdout_full is None:
+            full = ''
+
+        self.assert_pcs_result(
+            command,
+            stdout_full=full,
+            stdout_start=stdout_start
+        )
+
+    def assert_pcs_fail(self, command, stdout_full=None, stdout_start=None):
+        self.assert_pcs_result(
+            command,
+            stdout_full=stdout_full,
+            stdout_start=stdout_start,
+            returncode=1
+        )
+
+    def assert_pcs_result(
+        self, command, stdout_full=None, stdout_start=None, returncode=0
+    ):
+        msg = 'Please specify exactly one: stdout_start or stdout_full'
+        if stdout_start is None and stdout_full is None:
+            raise Exception(msg +', none specified')
+
+        if stdout_start is not None and stdout_full is not None:
+            raise Exception(msg +', both specified')
+
+        stdout, pcs_returncode = self.pcs_runner.run(command)
+        self.assertEqual(
+            returncode, pcs_returncode, (
+                'Expected return code "{0}", but was "{1}"'
+                +'\ncommand:\n{2}\nstdout:\n{3}'
+            ).format(returncode, pcs_returncode, command, stdout)
+        )
+        if stdout_start:
+            expected_start = '\n'.join(stdout_start)+'\n' \
+                if isinstance(stdout_start, list) else stdout_start
+
+            if not stdout.startswith(expected_start):
+                self.assertTrue(
+                    False,
+                    'Stdout not start as expected\ncommand:\n'+command
+                    +'\ndiff is (expected 2nd):\n'
+                    +prepare_diff(stdout[:len(expected_start)], expected_start)
+                    +'\nFull stdout:'+stdout
+                )
+        else:
+            expected_full = '\n'.join(stdout_full)+'\n' \
+                if isinstance(stdout_full, list) else stdout_full
+
+            #unicode vs non-unicode not solved here
+            if stdout != expected_full:
+                self.assertEqual(
+                    stdout, expected_full,
+                    'Stdout is not as expected\ncommand:\n'+command
+                    +'\n diff is(expected 2nd):\n'
+                    +prepare_diff(stdout, expected_full)
+                    +'\nFull stdout:'+stdout
+                )
diff --git a/pcs/test/pcs_test_functions.py b/pcs/test/pcs_test_functions.py
index 9722e2f..3e84455 100644
--- a/pcs/test/pcs_test_functions.py
+++ b/pcs/test/pcs_test_functions.py
@@ -8,6 +8,7 @@ import sys
 import difflib
 import subprocess
 import re
+import xml.dom.minidom
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 sys.path.insert(0,parentdir)
 
@@ -16,8 +17,22 @@ import utils
 
 pcs_location = "../pcs.py"
 
-# Run pcs with -f on specified file
+class PcsRunner(object):
+    testfile = 'temp.xml'
+    def __init__(self, testfile='temp.xml'):
+        self.testfile = testfile
+
+
+    def run(self, args):
+        return pcs(self.testfile, args)
+
+
 def pcs(testfile, args = ""):
+    """Run pcs with -f on specified file
+    Return tuple with:
+        shell stdoutdata
+        shell returncode
+    """
     if args == "":
         args = testfile
         testfile = "temp.xml"
@@ -64,3 +79,6 @@ def isMinimumPacemakerVersion(cmajor,cminor,crev):
         return True
     return False
 
+
+def get_child_elements(el):
+    return [e for e in el.childNodes if e.nodeType == xml.dom.minidom.Node.ELEMENT_NODE]
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index c8677b2..9227f42 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -12,33 +12,40 @@ sys.path.insert(0,parentdir)
 
 import utils
 from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
+from pcs_test_functions import PcsRunner
+from pcs_test_assertions import AssertPcsMixin
 
 
 old_cib = "empty.xml"
 empty_cib = "empty-1.2.xml"
 temp_cib = "temp.xml"
 
-class ACLTest(unittest.TestCase):
+class ACLTest(unittest.TestCase, AssertPcsMixin):
+    pcs_runner = None
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
         shutil.copy("corosync.conf.orig", "corosync.conf")
+        self.pcs_runner = PcsRunner(temp_cib)
 
     def testAutoUpgradeofCIB(self):
         old_temp_cib = temp_cib + "-old"
         shutil.copy(old_cib, old_temp_cib)
+        self.pcs_runner.testfile = old_temp_cib
 
-        o,r = pcs(old_temp_cib, "acl show")
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\n")
-        assert r == 0
+        self.assert_pcs_success(
+            'acl show',
+            "ACLs are disabled, run 'pcs acl enable' to enable\n\n"
+        )
 
         with open(old_temp_cib) as myfile:
             data = myfile.read()
             assert data.find("pacemaker-1.2") != -1
             assert data.find("pacemaker-2.") == -1
 
-        o,r = pcs(old_temp_cib, "acl role create test_role read xpath my_xpath")
-        ac(o,"Cluster CIB has been upgraded to latest version\n")
-        assert r == 0
+        self.assert_pcs_success(
+            'acl role create test_role read xpath my_xpath',
+            "Cluster CIB has been upgraded to latest version\n"
+        )
 
         with open(old_temp_cib) as myfile:
             data = myfile.read()
@@ -731,17 +738,15 @@ Role: role4
         self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
         self.assertEqual(1, r)
 
-        o, r = pcs(
-            "acl permission add role1 read id dummy readX xpath //resources"
+        self.assert_pcs_fail(
+          "acl permission add role1 read id dummy readX xpath //resources",
+          stdout_start='\nUsage: pcs acl permission add...'
         )
-        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
-        self.assertEqual(1, r)
 
-        o, r = pcs(
-            "acl permission add role1 read id dummy read xpathX //resources"
+        self.assert_pcs_fail(
+          "acl permission add role1 read id dummy read xpathX //resources",
+          stdout_start='\nUsage: pcs acl permission add...'
         )
-        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
-        self.assertEqual(1, r)
 
         o, r = pcs("acl")
         ac(o, """\
@@ -754,6 +759,60 @@ Role: role4
 """)
         self.assertEqual(0, r)
 
+    def test_can_add_permission_for_existing_id(self):
+        self.assert_pcs_success('acl role create role1')
+        self.assert_pcs_success('acl role create role2')
+        self.assert_pcs_success("acl permission add role1 read id role2")
+
+    def test_can_add_permission_for_existing_xpath(self):
+        self.assert_pcs_success('acl role create role1')
+        self.assert_pcs_success("acl permission add role1 read xpath //nodes")
+
+    def test_can_not_add_permission_for_nonexisting_id(self):
+        self.assert_pcs_success('acl role create role1')
+        self.assert_pcs_fail(
+            'acl permission add role1 read id non-existent-id',
+            'Error: id "non-existent-id" does not exist.\n'
+        )
+
+    def test_can_not_add_permission_for_nonexisting_id_in_later_part(self):
+        self.assert_pcs_success('acl role create role1')
+        self.assert_pcs_success('acl role create role2')
+        self.assert_pcs_fail(
+            'acl permission add role1 read id role2 read id no-existent-id',
+            'Error: id "no-existent-id" does not exist.\n'
+        )
+
+    def test_can_not_add_permission_for_nonexisting_role_with_bad_id(self):
+        self.assert_pcs_success('acl role create role1')
+        self.assert_pcs_fail(
+            'acl permission add #bad-name read id role1',
+            "Error: invalid ACL role '#bad-name'"
+            +", '#' is not a valid first character for a ACL role\n"
+        )
+
+    def test_can_create_role_with_permission_for_existing_id(self):
+        self.assert_pcs_success('acl role create role2')
+        self.assert_pcs_success('acl role create role1 read id role2')
+
+    def test_can_not_crate_role_with_permission_for_nonexisting_id(self):
+        self.assert_pcs_fail(
+            'acl role create role1 read id non-existent-id',
+            'Error: id "non-existent-id" does not exist.\n'
+        )
+
+    def test_can_not_create_role_with_bad_name(self):
+        self.assert_pcs_fail(
+            'acl role create #bad-name',
+            "Error: invalid ACL role '#bad-name'"
+            +", '#' is not a valid first character for a ACL role\n"
+        )
+
+    def test_fail_on_unknown_role_method(self):
+        self.assert_pcs_fail(
+            'acl role unknown whatever',
+            stdout_start="\nUsage: pcs acl role..."
+        )
 
 if __name__ == "__main__":
     if isMinimumPacemakerVersion(1,1,11):
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 805fc80..8ec303d 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -12,21 +12,24 @@ sys.path.insert(0, parentdir)
 
 import utils
 from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
+from pcs_test_functions import PcsRunner
+from pcs_test_assertions import AssertPcsMixin
 
 
 empty_cib = "empty-withnodes.xml"
 temp_cib = "temp.xml"
 
-class ClusterTest(unittest.TestCase):
+class ClusterTest(unittest.TestCase, AssertPcsMixin):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
         if os.path.exists("corosync.conf.tmp"):
             os.unlink("corosync.conf.tmp")
         if os.path.exists("cluster.conf.tmp"):
             os.unlink("cluster.conf.tmp")
 
     def testNodeStandby(self):
-        output, returnVal = pcs(temp_cib, "cluster standby rh7-1") 
+        output, returnVal = pcs(temp_cib, "cluster standby rh7-1")
         ac(output, "")
         assert returnVal == 0
 
@@ -44,7 +47,7 @@ class ClusterTest(unittest.TestCase):
         ac(output, "")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_cib, "cluster standby nonexistant-node") 
+        output, returnVal = pcs(temp_cib, "cluster standby nonexistant-node")
         assert returnVal == 1
         assert output == "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
 
@@ -2470,6 +2473,16 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
         ac(o,"Cluster CIB has been upgraded to latest version\n")
         assert r == 0
 
+    def test_can_not_setup_cluster_for_unknown_transport_type(self):
+        self.assert_pcs_fail(
+            'cluster setup --local --corosync_conf=corosync.conf.tmp'
+                +" --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+                +" --transport=unknown"
+            ,
+            "Error: unknown transport 'unknown', use --force to override\n"
+        )
+
+
 if __name__ == "__main__":
     unittest.main()
 
diff --git a/pcs/test/test_library_acl.py b/pcs/test/test_library_acl.py
new file mode 100644
index 0000000..63eea0a
--- /dev/null
+++ b/pcs/test/test_library_acl.py
@@ -0,0 +1,214 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import unittest
+import os.path
+import sys
+
+currentdir = os.path.dirname(os.path.abspath(__file__))
+sys.path.insert(0, os.path.dirname(currentdir))
+
+import library_acl as lib
+from errors import error_codes
+from errors import ReportItemSeverity as severities
+from library_test_tools import LibraryAssertionMixin
+from library_test_tools import get_cib_manipulation_creator
+
+
+class LibraryAclTest(unittest.TestCase, LibraryAssertionMixin):
+    def setUp(self):
+        self.create_cib = get_cib_manipulation_creator(
+            os.path.join(currentdir, "empty.xml")
+        )
+        self.cib = self.create_cib()
+
+    def fixture_add_role(self, role_id):
+        self.cib.append_to_first_tag_name(
+            'configuration',
+            '<acls><acl_role id="{0}"/></acls>'.format(role_id)
+        )
+
+class CreateRoleTest(LibraryAclTest):
+    def test_create_for_new_role_id(self):
+        role_id = 'new-id'
+        lib.create_role(self.cib.dom, role_id)
+
+        self.assert_cib_equal(
+            self.create_cib().append_to_first_tag_name(
+                'configuration',
+                '<acls><acl_role id="{0}"/></acls>'.format(role_id)
+            )
+        )
+
+    def test_refuse_invalid_id(self):
+        self.assert_raise_library_error(
+            lambda: lib.create_role(self.cib.dom, '#invalid'),
+            (
+                severities.ERROR,
+                error_codes.ID_IS_NOT_VALID,
+                {'id': '#invalid'},
+            ),
+        )
+
+    def test_refuse_existing_role_id(self):
+        role_id = 'role1'
+        self.fixture_add_role(role_id)
+        self.assert_raise_library_error(
+            lambda: lib.create_role(self.cib.dom, role_id),
+            (
+                severities.ERROR,
+                error_codes.ACL_ROLE_ALREADY_EXISTS,
+                {'id': role_id},
+            ),
+        )
+
+    def test_refuse_existing_non_role_id(self):
+        self.cib.append_to_first_tag_name(
+            'nodes',
+            '<node id="node-id" uname="node-hostname"/>'
+        )
+
+        self.assert_raise_library_error(
+            lambda: lib.create_role(self.cib.dom, 'node-id'),
+            (
+                severities.ERROR,
+                error_codes.ID_ALREADY_EXISTS,
+                {'id': 'node-id'},
+            ),
+        )
+
+class AddPermissionsToRoleTest(LibraryAclTest):
+    def test_add_for_correct_permissions(self):
+        role_id = 'role1'
+        self.fixture_add_role(role_id)
+
+        lib.add_permissions_to_role(
+            self.cib.dom, role_id, [('read', 'xpath', '/whatever')]
+        )
+
+        self.assert_cib_equal(
+            self.create_cib().append_to_first_tag_name('configuration', '''
+              <acls>
+                <acl_role id="{0}">
+                  <acl_permission id="{0}-read" kind="read" xpath="/whatever"/>
+                </acl_role>
+              </acls>
+            '''.format(role_id))
+        )
+
+
+    def test_refuse_add_for_nonexistent_role_id(self):
+        role_id = 'role1'
+        self.assert_raise_library_error(
+            lambda: lib.add_permissions_to_role(
+                self.cib.dom, role_id, [('read', 'xpath', '/whatever')]
+            ),
+            (
+                severities.ERROR,
+                error_codes.ACL_ROLE_NOT_FOUND,
+                {'role_id': role_id},
+            ),
+        )
+
+    def test_refuse_bad_permission_and_bad_scope_type(self):
+        role_id = 'role1'
+        self.fixture_add_role(role_id)
+
+        self.assert_raise_library_error(
+            lambda: lib.add_permissions_to_role(
+                self.cib.dom, role_id, [('readX', 'xpathX', '/whatever')]
+            ),
+            (
+                severities.ERROR,
+                error_codes.BAD_ACL_PERMISSION,
+                {'permission': 'readX'},
+            ),
+            (
+                severities.ERROR,
+                error_codes.BAD_ACL_SCOPE_TYPE,
+                {'scope_type': 'xpathX'},
+            ),
+        )
+
+    def test_refuse_pointing_to_nonexisten_id(self):
+        role_id = 'role1'
+        self.fixture_add_role(role_id)
+
+        self.assert_raise_library_error(
+            lambda: lib.add_permissions_to_role(
+                self.cib.dom, role_id, [('read', 'id', 'non-existent')]
+            ),
+            (
+                severities.ERROR,
+                error_codes.ID_NOT_FOUND,
+                {'id': 'non-existent'}
+            ),
+        )
+
+class ProvideRoleTest(LibraryAclTest):
+    def test_add_role_for_nonexisting_id(self):
+        role_id = 'new-id'
+        lib.provide_role(self.cib.dom, role_id)
+
+        self.assert_cib_equal(
+            self.create_cib().append_to_first_tag_name('configuration', '''
+              <acls>
+                <acl_role id="{0}"/>
+              </acls>
+            '''.format(role_id))
+        )
+
+    def test_add_role_for_nonexisting_role_id(self):
+        self.fixture_add_role('role1')
+
+        role_id = 'role1'
+        lib.provide_role(self.cib.dom, role_id)
+
+        self.assert_cib_equal(
+            self.create_cib().append_to_first_tag_name('configuration', '''
+              <acls>
+                <acl_role id="{0}"/>
+              </acls>
+            '''.format(role_id))
+        )
+
+class RemovePermissionForReferenceTest(LibraryAclTest):
+    def test_has_no_efect_when_id_not_referenced(self):
+        lib.remove_permissions_referencing(self.cib.dom, 'dummy')
+        self.assert_cib_equal(self.create_cib())
+
+    def test_remove_all_references(self):
+        self.cib.append_to_first_tag_name('configuration', '''
+            <acls>
+              <acl_role id="role1">
+                <acl_permission id="role1-read" kind="read" reference="dummy"/>
+                <acl_permission id="role1-read" kind="read" reference="dummy2"/>
+              </acl_role>
+              <acl_role id="role2">
+                <acl_permission id="role2-read" kind="read" reference="dummy"/>
+              </acl_role>
+            </acls>
+        ''')
+
+        lib.remove_permissions_referencing(self.cib.dom, 'dummy')
+
+        self.assert_cib_equal(
+            self.create_cib().append_to_first_tag_name('configuration', '''
+              <acls>
+                <acl_role id="role1">
+                  <acl_permission
+                    id="role1-read"
+                    kind="read"
+                    reference="dummy2"
+                  />
+                </acl_role>
+                <acl_role id="role2"/>
+              </acls>
+            ''')
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py
index aeab52b..fe2d436 100644
--- a/pcs/test/test_properties.py
+++ b/pcs/test/test_properties.py
@@ -117,21 +117,213 @@ class PropertyTest(unittest.TestCase):
         assert r==0
 
     def testBadProperties(self):
-        o,r = pcs("property set xxxx=zzzz")
-        assert r==1
+        o,r = pcs(temp_cib, "property set xxxx=zzzz")
+        self.assertEqual(r, 1)
         ac(o,"Error: unknown cluster property: 'xxxx', (use --force to override)\n")
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, "Cluster Properties:\n")
 
-        output, returnVal = pcs("property set =5678 --force")
-        ac(output, "Error: property name cannot be empty\n")
-        assert returnVal == 1
+        output, returnVal = pcs(temp_cib, "property set =5678 --force")
+        ac(output, "Error: empty property name: '=5678'\n")
+        self.assertEqual(returnVal, 1)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, "Cluster Properties:\n")
+
+        output, returnVal = pcs(temp_cib, "property set =5678")
+        ac(output, "Error: empty property name: '=5678'\n")
+        self.assertEqual(returnVal, 1)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, "Cluster Properties:\n")
+
+        output, returnVal = pcs(temp_cib, "property set bad_format")
+        ac(output, "Error: invalid property format: 'bad_format'\n")
+        self.assertEqual(returnVal, 1)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, "Cluster Properties:\n")
 
-        o,r = pcs("property unset zzzzz")
-        assert r==1
+        output, returnVal = pcs(temp_cib, "property set bad_format --force")
+        ac(output, "Error: invalid property format: 'bad_format'\n")
+        self.assertEqual(returnVal, 1)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, "Cluster Properties:\n")
+
+        o,r = pcs(temp_cib, "property unset zzzzz")
+        self.assertEqual(r, 1)
         ac(o,"Error: can't remove property: 'zzzzz' that doesn't exist\n")
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, "Cluster Properties:\n")
 
-        o,r = pcs("property unset zzzz --force")
-        assert r==0
+        o,r = pcs(temp_cib, "property unset zzzz --force")
+        self.assertEqual(r, 0)
         ac(o,"")
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, "Cluster Properties:\n")
+
+    def test_set_property_validation_enum(self):
+        output, returnVal = pcs(
+            temp_cib, "property set no-quorum-policy=freeze"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ no-quorum-policy: freeze
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set no-quorum-policy=freeze --force"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ no-quorum-policy: freeze
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set no-quorum-policy=not_valid_value"
+        )
+        ac(
+            output,
+            "Error: invalid value of property: "
+            "'no-quorum-policy=not_valid_value', (use --force to override)\n"
+        )
+        self.assertEqual(returnVal, 1)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ no-quorum-policy: freeze
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set no-quorum-policy=not_valid_value --force"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ no-quorum-policy: not_valid_value
+"""
+        )
+
+    def test_set_property_validation_boolean(self):
+        output, returnVal = pcs(temp_cib, "property set enable-acl=TRUE")
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ enable-acl: TRUE
+"""
+        )
+
+        output, returnVal = pcs(temp_cib, "property set enable-acl=no")
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ enable-acl: no
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set enable-acl=TRUE --force"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ enable-acl: TRUE
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set enable-acl=not_valid_value"
+        )
+        ac(
+            output,
+            "Error: invalid value of property: "
+            "'enable-acl=not_valid_value', (use --force to override)\n"
+        )
+        self.assertEqual(returnVal, 1)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ enable-acl: TRUE
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set enable-acl=not_valid_value --force"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ enable-acl: not_valid_value
+"""
+        )
+
+    def test_set_property_validation_integer(self):
+        output, returnVal = pcs(
+            temp_cib, "property set default-resource-stickiness=0"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ default-resource-stickiness: 0
+"""
+        )
+
+
+        output, returnVal = pcs(
+            temp_cib, "property set default-resource-stickiness=-10"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ default-resource-stickiness: -10
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set default-resource-stickiness=0 --force"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ default-resource-stickiness: 0
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set default-resource-stickiness=0.1"
+        )
+        ac(
+            output,
+            "Error: invalid value of property: "
+            "'default-resource-stickiness=0.1', (use --force to override)\n"
+        )
+        self.assertEqual(returnVal, 1)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ default-resource-stickiness: 0
+"""
+        )
+
+        output, returnVal = pcs(
+            temp_cib, "property set default-resource-stickiness=0.1 --force"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+        o, _ = pcs(temp_cib, "property list")
+        ac(o, """Cluster Properties:
+ default-resource-stickiness: 0.1
+"""
+        )
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 730450d..25e1167 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -14,6 +14,8 @@ sys.path.insert(0, parentdir)
 import utils
 from pcs_test_functions import pcs, ac
 import resource
+from pcs_test_functions import PcsRunner
+from pcs_test_assertions import AssertPcsMixin
 
 
 empty_cib = "empty.xml"
@@ -92,13 +94,13 @@ class ResourceTest(unittest.TestCase):
         ac(o,"Error: Unable to create resource 'ipaddr3', it is not installed on this system (use --force to override)\n")
 
     def testEmpty(self):
-        output, returnVal = pcs(temp_cib, "resource") 
+        output, returnVal = pcs(temp_cib, "resource")
         assert returnVal == 0, 'Unable to list resources'
         assert output == "NO resources configured\n", "Bad output"
 
 
     def testDescribe(self):
-        output, returnVal = pcs(temp_cib, "resource describe bad_resource") 
+        output, returnVal = pcs(temp_cib, "resource describe bad_resource")
         assert returnVal == 1
         assert output == "Error: Unable to find resource: bad_resource\n"
 
@@ -156,33 +158,33 @@ the health of a system via IPMI.
 
     def testAddResources(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output == "Error: unable to create resource/fence device 'ClusterIP', 'ClusterIP' already exists on this system\n",[output]
-    
+
         line = "resource create --no-default-ops ClusterIP2 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
         line = "resource create --no-default-ops ClusterIP3 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
         line = "resource create --no-default-ops ClusterIP4  ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
         line = "resource create --no-default-ops ClusterIP5 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
         line = "resource create --no-default-ops ClusterIP6  ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=31s start interval=32s op stop interval=33s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
         line = "resource create --no-default-ops ClusterIP7 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s --disabled"
@@ -309,17 +311,17 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
 
     def testAddBadResources(self):
         line = "resource create --no-default-ops bad_resource idontexist test=bad"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output == "Error: Unable to create resource 'idontexist', it is not installed on this system (use --force to override)\n",[output]
 
         line = "resource create --no-default-ops bad_resource2 idontexist2 test4=bad3 --force"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = "resource show --full"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         ac(output, """\
  Resource: bad_resource2 (class=ocf provider=heartbeat type=idontexist2)
@@ -334,20 +336,20 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
     def testDeleteResources(self):
 # Verify deleting resources works
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource delete'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output.startswith("\nUsage: pcs resource")
 
         line = "resource delete ClusterIP"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == "Deleting Resource - ClusterIP\n"
-        
+
         output, returnVal = pcs(temp_cib, "resource show ClusterIP")
         assert returnVal == 1
         assert output == "Error: unable to find resource 'ClusterIP'\n"
@@ -362,7 +364,7 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
 
     def testResourceShow(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
@@ -376,12 +378,12 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
 
     def testResourceUpdate(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource update'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output.startswith("\nUsage: pcs resource")
 
@@ -391,7 +393,7 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
 
     def testAddOperation(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         ac(output,"")
         assert returnVal == 0
 
@@ -404,17 +406,17 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
         assert o == "Error: remove_operation has been deprecated, please use 'op remove'\n"
 
         line = 'resource op add'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output.startswith("\nUsage: pcs resource")
 
         line = 'resource op remove'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output.startswith("\nUsage: pcs resource")
 
         line = 'resource op add ClusterIP monitor interval=31s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         ac(output, """\
 Error: operation monitor already specified for ClusterIP, use --force to override:
 monitor interval=30s (ClusterIP-monitor-interval-30s)
@@ -428,7 +430,7 @@ monitor interval=30s (ClusterIP-monitor-interval-30s)
         assert output == ""
 
         line = 'resource op add ClusterIP monitor interval=31s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         ac(output, """\
 Error: operation monitor with interval 31s already specified for ClusterIP:
 monitor interval=31s (ClusterIP-monitor-interval-31s)
@@ -436,7 +438,7 @@ monitor interval=31s (ClusterIP-monitor-interval-31s)
         assert returnVal == 1
 
         line = 'resource op add ClusterIP monitor interval=31'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         ac(output, """\
 Error: operation monitor with interval 31s already specified for ClusterIP:
 monitor interval=31s (ClusterIP-monitor-interval-31s)
@@ -659,37 +661,37 @@ monitor interval=60s (state-monitor-interval-60s)
 
     def testRemoveOperation(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource op add ClusterIP monitor interval=31s --force'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource op add ClusterIP monitor interval=32s --force'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource op remove ClusterIP-monitor-interval-32s-xxxxx'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output == "Error: unable to find operation id: ClusterIP-monitor-interval-32s-xxxxx\n"
 
         line = 'resource op remove ClusterIP-monitor-interval-32s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource op remove ClusterIP monitor interval=30s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource op remove ClusterIP monitor interval=30s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 1
         assert output == 'Error: Unable to find operation matching: monitor interval=30s\n'
 
@@ -702,7 +704,7 @@ monitor interval=60s (state-monitor-interval-60s)
         assert returnVal == 0
 
         line = 'resource op remove ClusterIP monitor interval=31s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
@@ -714,12 +716,12 @@ monitor interval=60s (state-monitor-interval-60s)
         assert returnVal == 0
 
         line = 'resource op add ClusterIP monitor interval=31s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource op add ClusterIP monitor interval=32s --force'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
@@ -750,37 +752,37 @@ monitor interval=60s (state-monitor-interval-60s)
 
     def testUpdateOperation(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert output == ""
         assert returnVal == 0
 
         line = 'resource update ClusterIP op monitor interval=32s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource update ClusterIP op monitor interval=33s start interval=30s timeout=180s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource update ClusterIP op monitor interval=33s start interval=30s timeout=180s'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource update ClusterIP op'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource update ClusterIP op monitor'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         assert output == ""
 
         line = 'resource show ClusterIP --full'
-        output, returnVal = pcs(temp_cib, line) 
+        output, returnVal = pcs(temp_cib, line)
         ac(output, """\
  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
   Attributes: ip=192.168.0.99 cidr_netmask=32
@@ -978,7 +980,7 @@ monitor interval=20 (A-monitor-interval-20)
         o,r = pcs(temp_cib, "resource delete AGroup")
         ac(o,"Removing group: AGroup (and all resources within group)\nStopping all resources in group: AGroup...\nDeleting Resource - A1\nDeleting Resource - A2\nDeleting Resource (and group) - A3\n")
         assert r == 0
-        
+
         o,r = pcs(temp_cib, "resource show")
         assert r == 0
         ac(o,"NO resources configured\n")
@@ -1977,10 +1979,20 @@ Deleting Resource (and group and M/S) - dummylarge
         o,r = pcs(temp_cib, "resource unmanage D1")
         ac(o,"")
 
+        os.system("CIB_file="+temp_cib+" crm_resource --resource AG --set-parameter is-managed --meta --parameter-value false --force > /dev/null")
+
         o,r = pcs(temp_cib, "resource --full")
-        ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Meta Attrs: is-managed=false \n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Meta Attrs: is-managed=false \n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
+        ac(o,"""\
+ Group: AG
+  Meta Attrs: is-managed=false 
+  Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: is-managed=false 
+   Operations: monitor interval=60s (D1-monitor-interval-60s)
+  Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: is-managed=false 
+   Operations: monitor interval=60s (D2-monitor-interval-60s)
+""")
 
-        os.system("CIB_file="+temp_cib+" crm_resource --resource AG --set-parameter is-managed --meta --parameter-value false")
 
         o,r = pcs(temp_cib, "resource manage AG")
         ac(o,"")
@@ -4360,31 +4372,68 @@ Resource Utilization:
         ac(expected_out, output)
         self.assertEqual(0, returnVal)
 
-def test_resource_utilization_set_invalid(self):
-        output, returnVal = pcs(temp_large_cib, "resource utilization dummy0")
-        expected_out = """\
+    def test_resource_utilization_set_invalid(self):
+            output, returnVal = pcs(temp_large_cib, "resource utilization dummy0")
+            expected_out = """\
 Error: Unable to find a resource: dummy0
 """
-        ac(expected_out, output)
-        self.assertEqual(1, returnVal)
+            ac(expected_out, output)
+            self.assertEqual(1, returnVal)
 
-        output, returnVal = pcs(
-            temp_large_cib, "resource utilization dummy0 test=10"
-        )
-        expected_out = """\
+            output, returnVal = pcs(
+                temp_large_cib, "resource utilization dummy0 test=10"
+            )
+            expected_out = """\
 Error: Unable to find a resource: dummy0
 """
-        ac(expected_out, output)
-        self.assertEqual(1, returnVal)
+            ac(expected_out, output)
+            self.assertEqual(1, returnVal)
 
-        output, returnVal = pcs(
-            temp_large_cib, "resource utilization dummy1 test1=10 test=int"
-        )
-        expected_out = """\
+            output, returnVal = pcs(
+                temp_large_cib, "resource utilization dummy1 test1=10 test=int"
+            )
+            expected_out = """\
 Error: Value of utilization attribute must be integer: 'test=int'
 """
-        ac(expected_out, output)
-        self.assertEqual(1, returnVal)
+            ac(expected_out, output)
+            self.assertEqual(1, returnVal)
+
+class ResourcesReferencedFromAclTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy('empty-1.2.xml', temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
+
+    def test_remove_referenced_primitive_resource(self):
+        self.assert_pcs_success('resource create dummy Dummy')
+        self.assert_pcs_success('acl role create read-dummy read id dummy')
+        self.assert_pcs_success('resource delete dummy', [
+            'Deleting Resource - dummy'
+        ])
+
+    def test_remove_group_with_referenced_primitive_resource(self):
+        self.assert_pcs_success('resource create dummy1 Dummy')
+        self.assert_pcs_success('resource create dummy2 Dummy')
+        self.assert_pcs_success('resource group add dummy-group dummy1 dummy2')
+        self.assert_pcs_success('acl role create read-dummy read id dummy2')
+        self.assert_pcs_success('resource delete dummy-group', [
+            'Removing group: dummy-group (and all resources within group)',
+            'Stopping all resources in group: dummy-group...',
+            'Deleting Resource - dummy1',
+            'Deleting Resource (and group) - dummy2',
+        ])
+
+    def test_remove_referenced_group(self):
+        self.assert_pcs_success('resource create dummy1 Dummy')
+        self.assert_pcs_success('resource create dummy2 Dummy')
+        self.assert_pcs_success('resource group add dummy-group dummy1 dummy2')
+        self.assert_pcs_success('acl role create acl-role-a read id dummy-group')
+        self.assert_pcs_success('resource delete dummy-group', [
+            'Removing group: dummy-group (and all resources within group)',
+            'Stopping all resources in group: dummy-group...',
+            'Deleting Resource - dummy1',
+            'Deleting Resource (and group) - dummy2',
+        ])
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 66310ff..7382edc 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -30,9 +30,9 @@ class StonithTest(unittest.TestCase):
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "stonith create test2 fence_ilo")
+        output, returnVal = pcs(temp_cib, "stonith create test2 fence_apc")
         assert returnVal == 1
-        ac(output,"Error: missing required option(s): 'ipaddr, login' for resource type: stonith:fence_ilo (use --force to override)\n")
+        ac(output,"Error: missing required option(s): 'ipaddr, login' for resource type: stonith:fence_apc (use --force to override)\n")
 
         output, returnVal = pcs(temp_cib, "stonith create test2 fence_ilo --force")
         assert returnVal == 0
@@ -42,9 +42,9 @@ class StonithTest(unittest.TestCase):
         assert returnVal == 1
         assert output == "Error: resource option(s): 'bad_argument', are not recognized for resource type: 'stonith:fence_ilo' (use --force to override)\n",[output]
 
-        output, returnVal = pcs(temp_cib, "stonith create test9 fence_ilo pcmk_status_action=xxx")
+        output, returnVal = pcs(temp_cib, "stonith create test9 fence_apc pcmk_status_action=xxx")
         assert returnVal == 1
-        ac(output,"Error: missing required option(s): 'ipaddr, login' for resource type: stonith:fence_ilo (use --force to override)\n")
+        ac(output,"Error: missing required option(s): 'ipaddr, login' for resource type: stonith:fence_apc (use --force to override)\n")
 
         output, returnVal = pcs(temp_cib, "stonith create test9 fence_ilo pcmk_status_action=xxx --force")
         assert returnVal == 0
diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
index 86871ff..6a581d9 100644
--- a/pcs/test/test_utils.py
+++ b/pcs/test/test_utils.py
@@ -8,18 +8,21 @@ import sys
 import shutil
 import unittest
 import xml.dom.minidom
+import xml.etree.cElementTree as ET
 currentdir = os.path.dirname(os.path.abspath(__file__))
 parentdir = os.path.dirname(currentdir)
 sys.path.insert(0, parentdir)
 
 import utils
-from pcs_test_functions import pcs, ac
+from pcs_test_functions import pcs, ac, get_child_elements
 
 
 cib_with_nodes =  os.path.join(currentdir, "empty-withnodes.xml")
 empty_cib = os.path.join(currentdir, "empty.xml")
 temp_cib = os.path.join(currentdir, "temp.xml")
 
+unittest.TestCase.maxDiff = None
+
 class UtilsTest(unittest.TestCase):
 
     def get_cib_empty(self):
@@ -1779,30 +1782,34 @@ Membership information
     def test_dom_prepare_child_element(self):
         cib = self.get_cib_with_nodes_minidom()
         node = cib.getElementsByTagName("node")[0]
-        self.assertEqual(len(get_child_elemets(node)), 0)
-        child = utils.dom_prepare_child_element(node, "utilization", "rh7-1-")
-        self.assertEqual(len(get_child_elemets(node)), 1)
-        self.assertEqual(child, get_child_elemets(node)[0])
-        self.assertEqual(get_child_elemets(node)[0].tagName, "utilization")
+        self.assertEqual(len(get_child_elements(node)), 0)
+        child = utils.dom_prepare_child_element(
+            node, "utilization", "rh7-1-utilization"
+        )
+        self.assertEqual(len(get_child_elements(node)), 1)
+        self.assertEqual(child, get_child_elements(node)[0])
+        self.assertEqual(get_child_elements(node)[0].tagName, "utilization")
         self.assertEqual(
-            get_child_elemets(node)[0].getAttribute("id"), "rh7-1-utilization"
+            get_child_elements(node)[0].getAttribute("id"), "rh7-1-utilization"
         )
-        child2 = utils.dom_prepare_child_element(node, "utilization", "rh7-1-")
-        self.assertEqual(len(get_child_elemets(node)), 1)
+        child2 = utils.dom_prepare_child_element(
+            node, "utilization", "rh7-1-utilization"
+        )
+        self.assertEqual(len(get_child_elements(node)), 1)
         self.assertEqual(child, child2)
 
     def test_dom_update_nv_pair_add(self):
         nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement
         utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-")
-        self.assertEqual(len(get_child_elemets(nv_set)), 1)
-        pair = get_child_elemets(nv_set)[0]
+        self.assertEqual(len(get_child_elements(nv_set)), 1)
+        pair = get_child_elements(nv_set)[0]
         self.assertEqual(pair.getAttribute("name"), "test_name")
         self.assertEqual(pair.getAttribute("value"), "test_val")
         self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
         utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-")
-        self.assertEqual(len(get_child_elemets(nv_set)), 2)
-        self.assertEqual(pair, get_child_elemets(nv_set)[0])
-        pair = get_child_elemets(nv_set)[1]
+        self.assertEqual(len(get_child_elements(nv_set)), 2)
+        self.assertEqual(pair, get_child_elements(nv_set)[0])
+        pair = get_child_elements(nv_set)[1]
         self.assertEqual(pair.getAttribute("name"), "another_name")
         self.assertEqual(pair.getAttribute("value"), "value")
         self.assertEqual(pair.getAttribute("id"), "prefix2-another_name")
@@ -1815,9 +1822,9 @@ Membership information
         </nv_set>
         """).documentElement
         utils.dom_update_nv_pair(nv_set, "test_name", "new_value")
-        self.assertEqual(len(get_child_elemets(nv_set)), 2)
-        pair1 = get_child_elemets(nv_set)[0]
-        pair2 = get_child_elemets(nv_set)[1]
+        self.assertEqual(len(get_child_elements(nv_set)), 2)
+        pair1 = get_child_elements(nv_set)[0]
+        pair2 = get_child_elements(nv_set)[1]
         self.assertEqual(pair1.getAttribute("name"), "test_name")
         self.assertEqual(pair1.getAttribute("value"), "new_value")
         self.assertEqual(pair1.getAttribute("id"), "prefix-test_name")
@@ -1833,15 +1840,15 @@ Membership information
         </nv_set>
         """).documentElement
         utils.dom_update_nv_pair(nv_set, "non_existing_name", "")
-        self.assertEqual(len(get_child_elemets(nv_set)), 2)
+        self.assertEqual(len(get_child_elements(nv_set)), 2)
         utils.dom_update_nv_pair(nv_set, "another_name", "")
-        self.assertEqual(len(get_child_elemets(nv_set)), 1)
-        pair = get_child_elemets(nv_set)[0]
+        self.assertEqual(len(get_child_elements(nv_set)), 1)
+        pair = get_child_elements(nv_set)[0]
         self.assertEqual(pair.getAttribute("name"), "test_name")
         self.assertEqual(pair.getAttribute("value"), "test_val")
         self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
         utils.dom_update_nv_pair(nv_set, "test_name", "")
-        self.assertEqual(len(get_child_elemets(nv_set)), 0)
+        self.assertEqual(len(get_child_elements(nv_set)), 0)
 
     def test_convert_args_to_tuples(self):
         out = utils.convert_args_to_tuples(
@@ -1874,22 +1881,22 @@ Membership information
             el, [("name", ""), ("key", "-1"), ("keys", "90")]
         )
 
-        self.assertEqual(len(get_child_elemets(el)), 1)
-        u = get_child_elemets(el)[0]
+        self.assertEqual(len(get_child_elements(el)), 1)
+        u = get_child_elements(el)[0]
         self.assertEqual(u.tagName, "utilization")
         self.assertEqual(u.getAttribute("id"), "test_id-utilization")
-        self.assertEqual(len(get_child_elemets(u)), 2)
+        self.assertEqual(len(get_child_elements(u)), 2)
 
         self.assertEqual(
-            get_child_elemets(u)[0].getAttribute("id"), "test_id-utilization-key"
+            get_child_elements(u)[0].getAttribute("id"), "test_id-utilization-key"
         )
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "-1")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "-1")
         self.assertEqual(
-            get_child_elemets(u)[1].getAttribute("id"), "test_id-utilization-keys"
+            get_child_elements(u)[1].getAttribute("id"), "test_id-utilization-keys"
         )
-        self.assertEqual(get_child_elemets(u)[1].getAttribute("name"), "keys")
-        self.assertEqual(get_child_elemets(u)[1].getAttribute("value"), "90")
+        self.assertEqual(get_child_elements(u)[1].getAttribute("name"), "keys")
+        self.assertEqual(get_child_elements(u)[1].getAttribute("value"), "90")
 
     def test_dom_update_utilization_update_remove(self):
         el = xml.dom.minidom.parseString("""
@@ -1904,13 +1911,13 @@ Membership information
             el, [("key", "100"), ("keys", "")]
         )
 
-        u = get_child_elemets(el)[0]
-        self.assertEqual(len(get_child_elemets(u)), 1)
+        u = get_child_elements(el)[0]
+        self.assertEqual(len(get_child_elements(u)), 1)
         self.assertEqual(
-            get_child_elemets(u)[0].getAttribute("id"), "test_id-utilization-key"
+            get_child_elements(u)[0].getAttribute("id"), "test_id-utilization-key"
         )
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "100")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "100")
 
     def test_dom_update_meta_attr_add(self):
         el = xml.dom.minidom.parseString("""
@@ -1920,22 +1927,22 @@ Membership information
             el, [("name", ""), ("key", "test"), ("key2", "val")]
         )
 
-        self.assertEqual(len(get_child_elemets(el)), 1)
-        u = get_child_elemets(el)[0]
+        self.assertEqual(len(get_child_elements(el)), 1)
+        u = get_child_elements(el)[0]
         self.assertEqual(u.tagName, "meta_attributes")
         self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes")
-        self.assertEqual(len(get_child_elemets(u)), 2)
+        self.assertEqual(len(get_child_elements(u)), 2)
 
         self.assertEqual(
-            get_child_elemets(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
+            get_child_elements(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
         )
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "test")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "test")
         self.assertEqual(
-            get_child_elemets(u)[1].getAttribute("id"), "test_id-meta_attributes-key2"
+            get_child_elements(u)[1].getAttribute("id"), "test_id-meta_attributes-key2"
         )
-        self.assertEqual(get_child_elemets(u)[1].getAttribute("name"), "key2")
-        self.assertEqual(get_child_elemets(u)[1].getAttribute("value"), "val")
+        self.assertEqual(get_child_elements(u)[1].getAttribute("name"), "key2")
+        self.assertEqual(get_child_elements(u)[1].getAttribute("value"), "val")
 
     def test_dom_update_meta_attr_update_remove(self):
         el = xml.dom.minidom.parseString("""
@@ -1950,13 +1957,13 @@ Membership information
             el, [("key", "another_val"), ("key2", "")]
         )
 
-        u = get_child_elemets(el)[0]
-        self.assertEqual(len(get_child_elemets(u)), 1)
+        u = get_child_elements(el)[0]
+        self.assertEqual(len(get_child_elements(u)), 1)
         self.assertEqual(
-            get_child_elemets(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
+            get_child_elements(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
         )
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "another_val")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "another_val")
 
     def test_get_utilization(self):
         el = xml.dom.minidom.parseString("""
@@ -1980,6 +1987,221 @@ Membership information
         """).documentElement
         self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el))
 
+    def test_get_cluster_property_from_xml_enum(self):
+        el = ET.fromstring("""
+        <parameter name="no-quorum-policy" unique="0">
+            <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
+            <content type="enum" default="stop"/>
+            <longdesc lang="en">What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide</longdesc>
+        </parameter>
+        """)
+        expected = {
+            "name": "no-quorum-policy",
+            "shortdesc": "What to do when the cluster does not have quorum",
+            "longdesc": "",
+            "type": "enum",
+            "default": "stop",
+            "enum": ["stop", "freeze", "ignore", "suicide"]
+        }
+        self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
+
+    def test_get_cluster_property_from_xml(self):
+        el = ET.fromstring("""
+        <parameter name="default-resource-stickiness" unique="0">
+            <shortdesc lang="en"></shortdesc>
+            <content type="integer" default="0"/>
+            <longdesc lang="en"></longdesc>
+        </parameter>
+        """)
+        expected = {
+            "name": "default-resource-stickiness",
+            "shortdesc": "",
+            "longdesc": "",
+            "type": "integer",
+            "default": "0"
+        }
+        self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
+
+    def test_get_cluster_property_default(self):
+        definition = {
+            "default-resource-stickiness": {
+                "name": "default-resource-stickiness",
+                "shortdesc": "",
+                "longdesc": "",
+                "type": "integer",
+                "default": "0",
+                "source": "pengine"
+            },
+            "no-quorum-policy": {
+                "name": "no-quorum-policy",
+                "shortdesc": "What to do when the cluster does not have quorum",
+                "longdesc": "What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide",
+                "type": "enum",
+                "default": "stop",
+                "enum": ["stop", "freeze", "ignore", "suicide"],
+                "source": "pengine"
+            },
+            "enable-acl": {
+                "name": "enable-acl",
+                "shortdesc": "Enable CIB ACL",
+                "longdesc": "Enable CIB ACL",
+                "type": "boolean",
+                "default": "false",
+                "source": "cib"
+            }
+        }
+        self.assertEqual(
+            utils.get_cluster_property_default(
+                definition, "default-resource-stickiness"
+            ),
+            "0"
+        )
+        self.assertEqual(
+            utils.get_cluster_property_default(definition, "no-quorum-policy"),
+            "stop"
+        )
+        self.assertEqual(
+            utils.get_cluster_property_default(definition, "enable-acl"),
+            "false"
+        )
+        self.assertRaises(
+            utils.UnknownPropertyException,
+            utils.get_cluster_property_default, definition, "non-existing"
+        )
+
+    def test_is_valid_cib_value_unknown_type(self):
+        # should be always true
+        self.assertTrue(utils.is_valid_cib_value("unknown", "test"))
+        self.assertTrue(utils.is_valid_cib_value("string", "string value"))
+
+    def test_is_valid_cib_value_integer(self):
+        self.assertTrue(utils.is_valid_cib_value("integer", "0"))
+        self.assertTrue(utils.is_valid_cib_value("integer", "42"))
+        self.assertTrue(utils.is_valid_cib_value("integer", "-90"))
+        self.assertTrue(utils.is_valid_cib_value("integer", "+90"))
+        self.assertTrue(utils.is_valid_cib_value("integer", "INFINITY"))
+        self.assertTrue(utils.is_valid_cib_value("integer", "-INFINITY"))
+        self.assertTrue(utils.is_valid_cib_value("integer", "+INFINITY"))
+        self.assertFalse(utils.is_valid_cib_value("integer", "0.0"))
+        self.assertFalse(utils.is_valid_cib_value("integer", "-10.9"))
+        self.assertFalse(utils.is_valid_cib_value("integer", "string"))
+
+    def test_is_valid_cib_value_enum(self):
+        self.assertTrue(
+            utils.is_valid_cib_value("enum", "this", ["another", "this", "1"])
+        )
+        self.assertFalse(
+            utils.is_valid_cib_value("enum", "this", ["another", "this_not"])
+        )
+        self.assertFalse(utils.is_valid_cib_value("enum", "this", []))
+        self.assertFalse(utils.is_valid_cib_value("enum", "this"))
+
+    def test_is_valid_cib_value_boolean(self):
+        self.assertTrue(utils.is_valid_cib_value("boolean", "true"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "TrUe"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "TRUE"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "yes"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "on"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "y"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "Y"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "1"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "false"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "FaLse"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "FALSE"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "off"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "no"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "N"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "n"))
+        self.assertTrue(utils.is_valid_cib_value("boolean", "0"))
+        self.assertFalse(utils.is_valid_cib_value("boolean", "-1"))
+        self.assertFalse(utils.is_valid_cib_value("boolean", "not"))
+        self.assertFalse(utils.is_valid_cib_value("boolean", "random_string"))
+        self.assertFalse(utils.is_valid_cib_value("boolean", "truth"))
+
+    def test_is_valid_cib_value_time(self):
+        self.assertTrue(utils.is_valid_cib_value("time", "10"))
+        self.assertTrue(utils.is_valid_cib_value("time", "0"))
+        self.assertTrue(utils.is_valid_cib_value("time", "9s"))
+        self.assertTrue(utils.is_valid_cib_value("time", "10sec"))
+        self.assertTrue(utils.is_valid_cib_value("time", "10min"))
+        self.assertTrue(utils.is_valid_cib_value("time", "10m"))
+        self.assertTrue(utils.is_valid_cib_value("time", "10h"))
+        self.assertTrue(utils.is_valid_cib_value("time", "10hr"))
+        self.assertFalse(utils.is_valid_cib_value("time", "5.2"))
+        self.assertFalse(utils.is_valid_cib_value("time", "-10"))
+        self.assertFalse(utils.is_valid_cib_value("time", "10m 2s"))
+        self.assertFalse(utils.is_valid_cib_value("time", "hour"))
+        self.assertFalse(utils.is_valid_cib_value("time", "day"))
+
+    def test_validate_cluster_property(self):
+        definition = {
+            "default-resource-stickiness": {
+                "name": "default-resource-stickiness",
+                "shortdesc": "",
+                "longdesc": "",
+                "type": "integer",
+                "default": "0",
+                "source": "pengine"
+            },
+            "no-quorum-policy": {
+                "name": "no-quorum-policy",
+                "shortdesc": "What to do when the cluster does not have quorum",
+                "longdesc": "What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide",
+                "type": "enum",
+                "default": "stop",
+                "enum": ["stop", "freeze", "ignore", "suicide"],
+                "source": "pengine"
+            },
+            "enable-acl": {
+                "name": "enable-acl",
+                "shortdesc": "Enable CIB ACL",
+                "longdesc": "Enable CIB ACL",
+                "type": "boolean",
+                "default": "false",
+                "source": "cib"
+            }
+        }
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "default-resource-stickiness", "10"
+        ))
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "default-resource-stickiness", "-1"
+        ))
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "no-quorum-policy", "freeze"
+        ))
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "no-quorum-policy", "suicide"
+        ))
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "enable-acl", "true"
+        ))
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "enable-acl", "false"
+        ))
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "enable-acl", "on"
+        ))
+        self.assertTrue(utils.is_valid_cluster_property(
+            definition, "enable-acl", "OFF"
+        ))
+        self.assertFalse(utils.is_valid_cluster_property(
+            definition, "default-resource-stickiness", "test"
+        ))
+        self.assertFalse(utils.is_valid_cluster_property(
+            definition, "default-resource-stickiness", "1.2"
+        ))
+        self.assertFalse(utils.is_valid_cluster_property(
+            definition, "no-quorum-policy", "invalid"
+        ))
+        self.assertFalse(utils.is_valid_cluster_property(
+            definition, "enable-acl", "not"
+        ))
+        self.assertRaises(
+            utils.UnknownPropertyException,
+            utils.is_valid_cluster_property, definition, "unknown", "value"
+        )
+
     def assert_element_id(self, node, node_id):
         self.assertTrue(
             isinstance(node, xml.dom.minidom.Element),
@@ -1987,8 +2209,5 @@ Membership information
         )
         self.assertEqual(node.getAttribute("id"), node_id)
 
-def get_child_elemets(el):
-    return [e for e in el.childNodes if e.nodeType == xml.dom.minidom.Node.ELEMENT_NODE]
-
 if __name__ == "__main__":
     unittest.main()
diff --git a/pcs/usage.py b/pcs/usage.py
index a7bfd84..f412ce9 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -91,7 +91,7 @@ def sub_usage(args, output):
         return "\n" + usage + "\n" + ret.rstrip() + "\n"
     else:
         return output
-    
+
 def dict_depth(d, depth=0):
     if not isinstance(d, dict) or not d:
         return depth
@@ -222,7 +222,7 @@ Commands:
         print(output)
     else:
         return output
-                                                    
+
 
 def resource(args = [], pout = True):
     output = """
@@ -249,10 +249,10 @@ Commands:
            [op <operation action> <operation options> [<operation action>
            <operation options>]...] [meta <meta options>...]
            [--clone <clone options> | --master <master options> |
-           --group <group name> [--before <resource id> | --after <resource id>]
+           --group <group id> [--before <resource id> | --after <resource id>]
            ] [--disabled] [--wait[=n]]
         Create specified resource.  If --clone is used a clone resource is
-        created if --master is specified a master/slave resource is created.
+        created.  If --master is specified a master/slave resource is created.
         If --group is specified the resource is added to the group named.  You
         can use --before or --after to specify the position of the added
         resource relatively to some resource already existing in the group.
@@ -415,7 +415,7 @@ Commands:
         otherwise.  If 'n' is not specified it defaults to 60 minutes.
         Example: pcs resource meta TestResource failure-timeout=50 stickiness=
 
-    group add <group name> <resource id> [resource id] ... [resource id]
+    group add <group id> <resource id> [resource id] ... [resource id]
               [--before <resource id> | --after <resource id>] [--wait[=n]]
         Add the specified resource to the group, creating the group if it does
         not exist.  If the resource is present in another group it is moved
@@ -426,7 +426,7 @@ Commands:
         appropriate) and then return 0 on success or 1 on error.  If 'n' is not
         specified it defaults to 60 minutes.
 
-    group remove <group name> <resource id> [resource id] ... [resource id]
+    group remove <group id> <resource id> [resource id] ... [resource id]
           [--wait[=n]]
         Remove the specified resource(s) from the group, removing the group if
         it no resources remain.  If --wait is specified, pcs will wait up to 'n'
@@ -434,7 +434,7 @@ Commands:
         appropriate) and then return 0 on success or 1 on error.  If 'n' is not
         specified it defaults to 60 minutes.
 
-    ungroup <group name> [resource id] ... [resource id] [--wait[=n]]
+    ungroup <group id> [resource id] ... [resource id] [--wait[=n]]
         Remove the group (Note: this does not remove any resources from the
         cluster) or if resources are specified, remove the specified resources
         from the group.  If --wait is specified, pcs will wait up to 'n' seconds
@@ -449,15 +449,14 @@ Commands:
         on success or 1 on error.  If 'n' is not specified it defaults to 60
         minutes.
 
-    unclone <resource id | group name> [--wait[=n]]
+    unclone <resource id | group id> [--wait[=n]]
         Remove the clone which contains the specified group or resource (the
         resource or group will not be removed).  If --wait is specified, pcs
         will wait up to 'n' seconds for the operation to finish (including
         stopping clone instances if appropriate) and then return 0 on success
         or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 
-    master [<master/slave name>] <resource id | group name> [options]
-           [--wait[=n]]
+    master [<master/slave id>] <resource id | group id> [options] [--wait[=n]]
         Configure a resource or group as a multi-state (master/slave) resource.
         If --wait is specified, pcs will wait up to 'n' seconds for the operation
         to finish (including starting and promoting resource instances if
@@ -532,7 +531,7 @@ Examples:
 
 
     pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 \\
-               cidr_netmask=32 nic=eth2 op monitor interval=30s 
+               cidr_netmask=32 nic=eth2 op monitor interval=30s
       Create a new resource called 'VirtualIP' with options
 
     pcs resource create VirtualIP IPaddr2 ip=192.168.0.99 \\
@@ -821,7 +820,7 @@ Commands:
         Remove stonith id from configuration
 
     cleanup [<stonith id>]
-        Cleans up the stonith device in the lrmd (useful to reset the 
+        Cleans up the stonith device in the lrmd (useful to reset the
         status and failcount).  This tells the cluster to forget the
         operation history of a stonith device and re-detect its current state.
         This can be useful to purge knowledge of past failures that have
@@ -874,7 +873,7 @@ Examples:
 
 def property(args = [], pout = True):
     output = """
-Usage: pcs property <properties>...
+Usage: pcs property [commands]...
 Configure pacemaker properties
 
 Commands:
@@ -885,7 +884,8 @@ Commands:
         properties and their defaults.
         Run 'man pengine' and 'man crmd' to get a description of the properties.
 
-    set [--force] [--node <nodename>] <property>=[<value>]
+    set [--force | --node <nodename>] <property>=[<value>]
+            [<property>=[<value>] ...]
         Set specific pacemaker properties (if the value is blank then the
         property is removed from the configuration).  If a property is not
         recognized by pcs the property will not be created unless the
@@ -950,12 +950,12 @@ Commands:
         nodes or resources are specified then we only show information about
         them.  If --full is specified show the internal constraint id's as well.
 
-    location add <id> <resource name> <node> <score> [resource-discovery=<option>]
-        Add a location constraint with the appropriate id, resource name,
+    location add <id> <resource id> <node> <score> [resource-discovery=<option>]
+        Add a location constraint with the appropriate id, resource id,
         node name and score. (For more advanced pacemaker usage)
 
-    location remove <id> [<resource name> <node> <score>]
-        Remove a location constraint with the appropriate id, resource name,
+    location remove <id> [<resource id> <node> <score>]
+        Remove a location constraint with the appropriate id, resource id,
         node name and score. (For more advanced pacemaker usage)
 
     order show [--full]
@@ -1055,39 +1055,39 @@ Commands:
     disable
         Disable access control lists
 
-    role create <role name> [description=<description>] [((read | write | deny)
+    role create <role id> [description=<description>] [((read | write | deny)
                                                 (xpath <query> | id <id>))...]
-        Create a role with the name and (optional) description specified.
+        Create a role with the id and (optional) description specified.
         Each role can also have an unlimited number of permissions
         (read/write/deny) applied to either an xpath query or the id
         of a specific element in the cib
 
-    role delete <role name>
+    role delete <role id>
         Delete the role specified and remove it from any users/groups it was
         assigned to
 
-    role assign <role name> [to] <username/group>
+    role assign <role id> [to] <username/group>
         Assign a role to a user or group already created with 'pcs acl
         user/group create'
 
-    role unassign <role name> [from] <username/group>
+    role unassign <role id> [from] <username/group>
         Remove a role from the specified user
 
-    user create <username> <role name> [<role name>]...
+    user create <username> <role id> [<role id>]...
         Create an ACL for the user specified and assign roles to the user
 
     user delete <username>
         Remove the user specified (and roles assigned will be unassigned for
         the specified user)
 
-    group create <group> <role name> [<role name>]...
+    group create <group> <role id> [<role id>]...
         Create an ACL for the group specified and assign roles to the group
 
     group delete <group>
         Remove the group specified (and roles assigned will be unassigned for
         the specified group)
 
-    permission add <role name> ((read | write | deny) (xpath <query> |
+    permission add <role id> ((read | write | deny) (xpath <query> |
                                                                 id <id>))...
         Add the listed permissions to the role specified
 
@@ -1105,9 +1105,9 @@ def status(args = [], pout = True):
 Usage: pcs status [commands]...
 View current cluster and resource status
 Commands:
-    [status] [--full]
+    [status] [--full | --hide-inactive]
         View all information about the cluster and resources (--full provides
-        more details)
+        more details, --hide-inactive hides inactive resources)
 
     resources
         View current status of cluster resources
@@ -1127,8 +1127,9 @@ Commands:
         is specified, print nodes from both corosync & pacemaker.  If 'config'
         is specified, print nodes from corosync & pacemaker configuration.
 
-    pcsd <node> ...
-        Show the current status of pcsd on the specified nodes
+    pcsd [<node>] ...
+        Show the current status of pcsd on the specified nodes.
+        When no nodes are specified, status of all nodes is displayed.
 
     xml
         View xml version of status (output from crm_mon -r -1 -X)
diff --git a/pcs/utils.py b/pcs/utils.py
index 05afa76..18daa6e 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -21,6 +21,10 @@ import tarfile
 import fcntl
 import getpass
 import base64
+
+
+from errors import ReportItem
+from errors import ReportItemSeverity
 try:
     # python2
     from urllib import urlencode as urllib_urlencode
@@ -66,6 +70,12 @@ fence_bin = settings.fence_agent_binaries
 
 score_regexp = re.compile(r'^[+-]?((INFINITY)|(\d+))$')
 
+CIB_BOOLEAN_TRUE = ["true", "on", "yes", "y", "1"]
+CIB_BOOLEAN_FALSE = ["false", "off", "no", "n", "0"]
+
+class UnknownPropertyException(Exception):
+    pass
+
 def simple_cache(func):
     cache = {}
     def wrapper(*args):
@@ -478,7 +488,7 @@ def getCorosyncActiveNodes():
     nodename_re = re.compile(r"^nodelist\.node\.(\d+)\.ring0_addr.*= (.*)", re.M)
     nodestatus_re = re.compile(r"^runtime\.totem\.pg\.mrp\.srp\.members\.(\d+).status.*= (.*)", re.M)
     nodenameid_mapping_re = re.compile(r"nodelist\.node\.(\d+)\.nodeid.*= (\d+)", re.M)
-    
+
     nodes = nodename_re.findall(output)
     nodes_status = nodestatus_re.findall(output)
     nodes_mapping = nodenameid_mapping_re.findall(output)
@@ -783,6 +793,7 @@ def run(
             stdout=subprocess.PIPE,
             stderr=(subprocess.PIPE if ignore_stderr else subprocess.STDOUT),
             preexec_fn=subprocess_setup,
+            close_fds=True,
             env=env_var,
             # decodes newlines and in python3 also converts bytes to str
             universal_newlines=(not PYTHON2 and not binary_output)
@@ -1352,7 +1363,7 @@ def is_valid_resource(resource, caseInsensitiveCheck=False):
     elif resource.startswith("systemd:"):
         resource_split = resource.split(":",2)
         systemd_ra = resource_split[1]
-        if os.path.isfile("/usr/lib/systemd/system/" + systemd_ra + ".service"):
+        if os.path.isfile("/etc/systemd/system/" + systemd_ra + ".service") or os.path.isfile("/usr/lib/systemd/system/" + systemd_ra + ".service"):
             return True
         else:
             return False
@@ -1550,43 +1561,6 @@ def set_unmanaged(resource):
             "is-managed", "--meta", "--parameter-value", "false"]
     return run(args)
 
-def is_valid_property(prop):
-    output, retval = run([settings.pengine_binary, "metadata"])
-    if retval != 0:
-        err("unable to run pengine\n" + output)
-
-# whitelisted properties
-    if prop in ["enable-acl"]:
-        return True
-
-    dom = parseString(output)
-    properties = dom.getElementsByTagName("parameter");
-    for p in properties:
-        if p.getAttribute("name") == prop:
-            return True
-
-    output, retval = run([settings.crmd_binary, "metadata"])
-    if retval != 0:
-        err("unable to run crmd\n" + output)
-
-    dom = parseString(output)
-    properties = dom.getElementsByTagName("parameter");
-    for p in properties:
-        if p.getAttribute("name") == prop:
-            return True
-
-    output, retval = run([settings.cib_binary, "metadata"])
-    if retval != 0:
-        err("unable to run cib\n" + output)
-
-    dom = parseString(output)
-    properties = dom.getElementsByTagName("parameter");
-    for p in properties:
-        if p.getAttribute("name") == prop:
-            return True
-
-    return False
-
 def get_node_attributes():
     node_config = get_cib_xpath("//nodes")
     nas = {}
@@ -1622,39 +1596,36 @@ def set_node_attribute(prop, value, node):
 
 # If the property exists, remove it and replace it with the new property
 # If the value is blank, then we just remove it
-def set_cib_property(prop, value):
-    crm_config = get_cib_xpath("//crm_config")
-    if (crm_config == ""):
-        err("unable to get crm_config, is pacemaker running?")
-    property_found = False
-    document = parseString(crm_config)
-    crm_config = document.documentElement
-    cluster_property_set = crm_config.getElementsByTagName("cluster_property_set")
-    if len(cluster_property_set) == 0:
-        cluster_property_set = document.createElement("cluster_property_set")
-        cluster_property_set.setAttribute("id", "cib-bootstrap-options")
-        crm_config.appendChild(cluster_property_set) 
+def set_cib_property(prop, value, cib_dom=None):
+    update_cib = cib_dom is None
+    if update_cib:
+        crm_config = get_cib_xpath("//crm_config")
+        if crm_config == "":
+            err("unable to get crm_config, is pacemaker running?")
+        crm_config = parseString(crm_config).documentElement
     else:
-        cluster_property_set = cluster_property_set[0]
+        document = cib_dom.getElementsByTagName("crm_config")
+        if len(document) == 0:
+            err("unable to get crm_config, is pacemaker running?")
+        crm_config = document[0]
+
+    property_found = False
+    cluster_property_set = dom_prepare_child_element(
+        crm_config, "cluster_property_set", "cib-bootstrap-options"
+    )
+
     for child in cluster_property_set.getElementsByTagName("nvpair"):
-        if (child.nodeType != xml.dom.minidom.Node.ELEMENT_NODE):
-            break
-        if (child.getAttribute("name") == prop):
-            child.parentNode.removeChild(child)
+        if child.getAttribute("name") == prop:
             property_found = True
             break
+    if not property_found and value == "" and "--force" not in pcs_options:
+        err("can't remove property: '{0}' that doesn't exist".format(prop))
+    dom_update_nv_pair(
+        cluster_property_set, prop, value, "cib-bootstrap-options-"
+    )
 
-# If the value is empty we don't add it to the cluster
-    if value != "":
-        new_property = document.createElement("nvpair")
-        new_property.setAttribute("id","cib-bootstrap-options-"+prop)
-        new_property.setAttribute("name",prop)
-        new_property.setAttribute("value",value)
-        cluster_property_set.appendChild(new_property)
-    elif not property_found and "--force" not in pcs_options:
-        err("can't remove property: '%s' that doesn't exist" % (prop))
-
-    replace_cib_configuration(crm_config)
+    if update_cib:
+        replace_cib_configuration(crm_config)
 
 def setAttribute(a_type, a_name, a_value):
     args = ["crm_attribute", "--type", a_type, "--attr-name", a_name,
@@ -1727,7 +1698,7 @@ def stonithCheck():
                 if prop.attrib["value"] == "off" or \
                         prop.attrib["value"] == "false":
                     return False
-        
+
     primitives = et.findall(str("configuration/resources/primitive"))
     for p in primitives:
         if p.attrib["class"] == "stonith":
@@ -1855,7 +1826,7 @@ def validInstanceAttributes(res_id, ra_values, resource_type):
         metadata = get_metadata("/usr/lib/ocf/resource.d/" + resProvider + "/" + resType)
 
     if metadata == False:
-        err("Unable to find resource: ocf:%s:%s" % (resProvider, resType))
+        err("Unable to get metadata for resource: %s" % resource_type)
 
     missing_required_parameters = []
     valid_parameters = ["pcmk_host_list", "pcmk_host_map", "pcmk_host_check", "pcmk_host_argument", "pcmk_arg_map", "pcmk_list_cmd", "pcmk_status_cmd", "pcmk_monitor_cmd"]
@@ -1904,7 +1875,7 @@ def validInstanceAttributes(res_id, ra_values, resource_type):
             #):
             missing_required_parameters.remove("port")
 
-    return bad_parameters, missing_required_parameters 
+    return bad_parameters, missing_required_parameters
 
 def getClusterName():
     if is_rhel6():
@@ -2018,7 +1989,10 @@ def verify_cert_key_pair(cert, key):
 # Does pacemaker consider a variable as true in cib?
 # See crm_is_true in pacemaker/lib/common/utils.c
 def is_cib_true(var):
-    return var.lower() in ("true", "on", "yes", "y", "1")
+    return var.lower() in CIB_BOOLEAN_TRUE
+
+def is_cib_boolean(val):
+    return val.lower() in CIB_BOOLEAN_TRUE + CIB_BOOLEAN_FALSE
 
 def is_systemctl():
     systemctl_paths = [
@@ -2050,16 +2024,47 @@ def err(errorText, exit_after_error=True):
     if exit_after_error:
         sys.exit(1)
 
+
+def process_library_reports(report_item_list):
+    """
+    report_item_list list of ReportItem
+    """
+    critical_error = False
+    for report_item in report_item_list:
+        if report_item.severity == ReportItemSeverity.WARNING:
+            print("Warning: " + report_item.message)
+            continue
+
+        if report_item.severity != ReportItemSeverity.ERROR:
+            print(report_item.message)
+            continue
+
+        if report_item.forceable and "--force" in pcs_options:
+            # Let the user know what may be wrong even when --force is used,
+            # as it may be used for override early errors hiding later
+            # errors otherwise.
+            print("Warning: " + report_item.message)
+            continue
+
+        sys.stderr.write('Error: {0}{1}\n'.format(
+            report_item.message,
+            ", use --force to override" if report_item.forceable else ''
+        ))
+        critical_error = True
+
+    if critical_error:
+        sys.exit(1)
+
 def serviceStatus(prefix):
-    if is_systemctl():
-        print("Daemon Status:")
-        daemons = ["corosync", "pacemaker", "pcsd"]
-        out, ret = run(["systemctl", "is-active"] + daemons)
-        status = out.split("\n")
-        out, ret = run(["systemctl", "is-enabled"]+ daemons)
-        enabled = out.split("\n")
-        for i in range(len(daemons)):
-            print(prefix + daemons[i] + ": " + status[i] + "/" + enabled[i])
+    if not is_systemctl():
+        return
+    print("Daemon Status:")
+    for service in ["corosync", "pacemaker", "pcsd"]:
+        print('{0}{1}: {2}/{3}'.format(
+            prefix, service,
+            run(["systemctl", 'is-active', service])[0].strip(),
+            run(["systemctl", 'is-enabled', service])[0].strip()
+        ))
 
 def enableServices():
     if is_rhel6():
@@ -2334,7 +2339,7 @@ def is_node_stop_cause_quorum_loss(quorum_info, local=True, node_list=None):
         votes_after_stop += node_info["votes"]
     return votes_after_stop < quorum_info["quorum"]
 
-def dom_prepare_child_element(dom_element, tag_name, id_prefix=""):
+def dom_prepare_child_element(dom_element, tag_name, id):
     dom = dom_element.ownerDocument
     child_elements = []
     for child in dom_element.childNodes:
@@ -2343,9 +2348,7 @@ def dom_prepare_child_element(dom_element, tag_name, id_prefix=""):
 
     if len(child_elements) == 0:
         child_element = dom.createElement(tag_name)
-        child_element.setAttribute(
-            "id", id_prefix + tag_name
-        )
+        child_element.setAttribute("id", find_unique_id(dom, id))
         dom_element.appendChild(child_element)
     else:
         child_element = child_elements[0]
@@ -2391,7 +2394,7 @@ def dom_update_utilization(dom_element, attributes, id_prefix=""):
     utilization = dom_prepare_child_element(
         dom_element,
         "utilization",
-        id_prefix + dom_element.getAttribute("id") + "-"
+        id_prefix + dom_element.getAttribute("id") + "-utilization"
     )
 
     for name, value in attributes:
@@ -2409,7 +2412,9 @@ def dom_update_utilization(dom_element, attributes, id_prefix=""):
 
 def dom_update_meta_attr(dom_element, attributes):
     meta_attributes = dom_prepare_child_element(
-        dom_element, "meta_attributes", dom_element.getAttribute("id") + "-"
+        dom_element,
+        "meta_attributes",
+        dom_element.getAttribute("id") + "-meta_attributes"
     )
 
     for name, value in attributes:
@@ -2437,3 +2442,139 @@ def get_utilization_str(element):
     for name, value in sorted(get_utilization(element).items()):
         output.append(name + "=" + value)
     return " ".join(output)
+
+def is_valid_cluster_property(prop_def_dict, property, value):
+    if property not in prop_def_dict:
+        raise UnknownPropertyException(
+            "unknown cluster property: '{0}'".format(property)
+        )
+    return is_valid_cib_value(
+        prop_def_dict[property]["type"],
+        value,
+        prop_def_dict[property].get("enum", [])
+    )
+
+
+def is_valid_cib_value(type, value, enum_options=[]):
+    type = type.lower()
+    if type == "enum":
+        return value in enum_options
+    elif type == "boolean":
+        return is_cib_boolean(value)
+    elif type == "integer":
+        return is_score(value)
+    elif type == "time":
+        return get_timeout_seconds(value) is not None
+    else:
+        return True
+
+
+def get_cluster_property_default(prop_def_dict, prop):
+    if prop not in prop_def_dict:
+        raise UnknownPropertyException(
+            "unknown cluster property: '{0}'".format(prop)
+        )
+    return prop_def_dict[prop]["default"]
+
+
+def get_cluster_properties_definition():
+    # we don't want to change these properties
+    banned_props = ["dc-version", "cluster-infrastructure"]
+    basic_props = [
+        "batch-limit", "no-quorum-policy", "symmetric-cluster", "enable-acl",
+        "stonith-enabled", "stonith-action", "pe-input-series-max",
+        "stop-orphan-resources", "stop-orphan-actions", "cluster-delay",
+        "start-failure-is-fatal", "pe-error-series-max", "pe-warn-series-max"
+        ]
+    readable_names = {
+        "batch-limit": "Batch Limit",
+        "no-quorum-policy": "No Quorum Policy",
+        "symmetric-cluster": "Symmetric",
+        "stonith-enabled": "Stonith Enabled",
+        "stonith-action": "Stonith Action",
+        "cluster-delay": "Cluster Delay",
+        "stop-orphan-resources": "Stop Orphan Resources",
+        "stop-orphan-actions": "Stop Orphan Actions",
+        "start-failure-is-fatal": "Start Failure is Fatal",
+        "pe-error-series-max": "PE Error Storage",
+        "pe-warn-series-max": "PE Warning Storage",
+        "pe-input-series-max": "PE Input Storage",
+        "enable-acl": "Enable ACLs"
+    }
+    sources = [
+        {
+            "name": "pengine",
+            "path": settings.pengine_binary
+        },
+        {
+            "name": "crmd",
+            "path": settings.crmd_binary
+        },
+        {
+            "name": "cib",
+            "path": settings.cib_binary
+        }
+    ]
+    definition = {}
+    for source in sources:
+        output, retval = run([source["path"], "metadata"])
+        if retval != 0:
+            err("unable to run {0}\n".format(source["name"]) + output)
+        etree = ET.fromstring(output)
+        for e in etree.findall("./parameters/parameter"):
+            prop = get_cluster_property_from_xml(e)
+            if prop["name"] not in banned_props:
+                prop["source"] = source["name"]
+                prop["advanced"] = prop["name"] not in basic_props
+                if prop["name"] in readable_names:
+                    prop["readable_name"] = readable_names[prop["name"]]
+                else:
+                    prop["readable_name"] = prop["name"]
+                definition[prop["name"]] = prop
+    return definition
+
+
+def get_cluster_property_from_xml(etree_el):
+    property = {
+        "name": etree_el.get("name"),
+        "shortdesc": etree_el.find("shortdesc").text,
+        "longdesc": etree_el.find("longdesc").text
+    }
+    if property["shortdesc"] is None:
+        property["shortdesc"] = ""
+    if property["longdesc"] is None:
+        property["longdesc"] = ""
+
+    content = etree_el.find("content")
+    if content is None:
+        property["type"] = ""
+        property["default"] = ""
+    else:
+        property["type"] = content.get("type", "")
+        property["default"] = content.get("default", "")
+
+    if property["type"] == "enum":
+        property["enum"] = []
+        if property["longdesc"]:
+            values = property["longdesc"].split("  Allowed values: ")
+            if len(values) == 2:
+                property["enum"] = values[1].split(", ")
+                property["longdesc"] = values[0]
+        if property["default"] not in property["enum"]:
+            property["enum"].append(property["default"])
+
+    if property["longdesc"] == property["shortdesc"]:
+        property["longdesc"] = ""
+    return property
+
+def get_acls(dom):
+    acls = dom.getElementsByTagName("acls")
+    if len(acls) == 0:
+        acls = dom.createElement("acls")
+        conf = dom.getElementsByTagName("configuration")
+        if len(conf) == 0:
+            utils.err("Unable to get configuration section of cib")
+        conf[0].appendChild(acls)
+    else:
+        acls = acls[0]
+    return acls
diff --git a/pcsd/auth.rb b/pcsd/auth.rb
index f5e7d38..18f934b 100644
--- a/pcsd/auth.rb
+++ b/pcsd/auth.rb
@@ -47,7 +47,7 @@ class PCSAuth
 
   def self.getUsersGroups(username)
     stdout, stderr, retval = run_cmd(
-      getSuperuserSession, "id", "-Gn", username
+      getSuperuserAuth(), "id", "-Gn", username
     )
     if retval != 0
       $logger.info(
@@ -94,41 +94,43 @@ class PCSAuth
     return false
   end
 
-  def self.loginByToken(session, cookies)
+  def self.loginByToken(cookies)
+    auth_user = {}
     if username = validToken(cookies["token"])
       if SUPERUSER == username
         if cookies['CIB_user'] and cookies['CIB_user'].strip != ''
-          session[:username] = cookies['CIB_user']
+          auth_user[:username] = cookies['CIB_user']
           if cookies['CIB_user_groups'] and cookies['CIB_user_groups'].strip != ''
-            session[:usergroups] = cookieUserDecode(
+            auth_user[:usergroups] = cookieUserDecode(
               cookies['CIB_user_groups']
             ).split(nil)
           else
-            session[:usergroups] = []
+            auth_user[:usergroups] = []
           end
         else
-          session[:username] = SUPERUSER
-          session[:usergroups] = []
+          auth_user[:username] = SUPERUSER
+          auth_user[:usergroups] = []
         end
-        return true
+        return auth_user
       else
-        session[:username] = username
+        auth_user[:username] = username
         success, groups = getUsersGroups(username)
-        session[:usergroups] = success ? groups : []
-        return true
+        auth_user[:usergroups] = success ? groups : []
+        return auth_user
       end
     end
-    return false
+    return nil
   end
 
-  def self.loginByPassword(session, username, password)
+  def self.loginByPassword(username, password)
     if validUser(username, password)
-      session[:username] = username
+      auth_user = {}
+      auth_user[:username] = username
       success, groups = getUsersGroups(username)
-      session[:usergroups] = success ? groups : []
-      return true
+      auth_user[:usergroups] = success ? groups : []
+      return auth_user
     end
-    return false
+    return nil
   end
 
   def self.isLoggedIn(session)
@@ -141,7 +143,7 @@ class PCSAuth
     return false
   end
 
-  def self.getSuperuserSession()
+  def self.getSuperuserAuth()
     return {
       :username => SUPERUSER,
       :usergroups => [],
@@ -162,5 +164,17 @@ class PCSAuth
   def self.cookieUserDecode(text)
     return Base64.decode64(text)
   end
+
+  def self.sessionToAuthUser(session)
+    return {
+      :username => session[:username],
+      :usergroups => session[:usergroups],
+    }
+  end
+
+  def self.authUserToSession(auth_user, session)
+    session[:username] = auth_user[:username]
+    session[:usergroups] = auth_user[:usergroups]
+  end
 end
 
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 027e654..d2c6c25 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -43,7 +43,7 @@ def get_pcs_path(pcsd_path)
   end
 end
 
-PCS_VERSION = '0.9.148'
+PCS_VERSION = '0.9.149'
 COROSYNC = COROSYNC_BINARIES + "corosync"
 ISRHEL6 = is_rhel6
 ISSYSTEMCTL = is_systemctl
diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
index 0cfb587..ca5a21a 100644
--- a/pcsd/cfgsync.rb
+++ b/pcsd/cfgsync.rb
@@ -425,7 +425,7 @@ module Cfgsync
 
 
   class ConfigPublisher
-    def initialize(session, configs, nodes, cluster_name, tokens={})
+    def initialize(auth_user, configs, nodes, cluster_name, tokens={})
       @configs = configs
       @nodes = nodes
       @cluster_name = cluster_name
@@ -433,7 +433,7 @@ module Cfgsync
         cfg.class.name
       }
       @additional_tokens = tokens
-      @session = session
+      @auth_user = auth_user
     end
 
     def send(force=false)
@@ -451,7 +451,7 @@ module Cfgsync
       @nodes.each { |node|
         threads << Thread.new {
           code, out = send_request_with_token(
-            @session, node, 'set_configs', true, data, true, nil, 30,
+            @auth_user, node, 'set_configs', true, data, true, nil, 30,
             @additional_tokens
           )
           if 200 == code
@@ -535,11 +535,11 @@ module Cfgsync
 
 
   class ConfigFetcher
-    def initialize(session, config_classes, nodes, cluster_name)
+    def initialize(auth_user, config_classes, nodes, cluster_name)
       @config_classes = config_classes
       @nodes = nodes
       @cluster_name = cluster_name
-      @session = session
+      @auth_user = auth_user
     end
 
     def fetch_all()
@@ -591,7 +591,7 @@ module Cfgsync
       nodes.each { |node|
         threads << Thread.new {
           code, out = send_request_with_token(
-            @session, node, 'get_configs', false, data
+            @auth_user, node, 'get_configs', false, data
           )
           if 200 == code
             begin
@@ -700,13 +700,13 @@ module Cfgsync
     else
       # we run in a cluster so we need to sync the config
       publisher = ConfigPublisher.new(
-        PCSAuth.getSuperuserSession(), [config], nodes, cluster_name, tokens
+        PCSAuth.getSuperuserAuth(), [config], nodes, cluster_name, tokens
       )
       old_configs, node_responses = publisher.publish()
       if old_configs.include?(config.class.name)
         if fetch_on_conflict
           fetcher = ConfigFetcher.new(
-            PCSAuth.getSuperuserSession(), [config.class], nodes, cluster_name
+            PCSAuth.getSuperuserAuth(), [config.class], nodes, cluster_name
           )
           cfgs_to_save, _ = fetcher.fetch()
           cfgs_to_save.each { |cfg_to_save|
@@ -751,7 +751,7 @@ module Cfgsync
     end
     # we run in a cluster so we need to sync the config
     publisher = ConfigPublisher.new(
-      PCSAuth.getSuperuserSession(), [config_new], nodes, cluster_name,
+      PCSAuth.getSuperuserAuth(), [config_new], nodes, cluster_name,
       new_tokens
     )
     old_configs, node_responses = publisher.publish()
@@ -761,7 +761,7 @@ module Cfgsync
     end
     # get tokens from all nodes and merge them
     fetcher = ConfigFetcher.new(
-      PCSAuth.getSuperuserSession(), [config_new.class], nodes, cluster_name
+      PCSAuth.getSuperuserAuth(), [config_new.class], nodes, cluster_name
     )
     fetched_tokens = fetcher.fetch_all()[config_new.class.name]
     config_new = Cfgsync::merge_tokens_files(config, fetched_tokens, new_tokens)
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
index 41dfff7..1da29e2 100644
--- a/pcsd/cluster_entity.rb
+++ b/pcsd/cluster_entity.rb
@@ -1018,7 +1018,7 @@ module ClusterEntity
       @pcsd_enabled = false
     end
 
-    def self.load_current_node(session, crm_dom=nil)
+    def self.load_current_node(crm_dom=nil)
       node = ClusterEntity::Node.new
       node.corosync = corosync_running?
       node.corosync_enabled = corosync_enabled?
diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb
index b52ad6f..8b0bc12 100644
--- a/pcsd/fenceagent.rb
+++ b/pcsd/fenceagent.rb
@@ -1,4 +1,4 @@
-def getFenceAgents(session, fence_agent = nil)
+def getFenceAgents(auth_user, fence_agent = nil)
   fence_agent_list = {}
   agents = Dir.glob('/usr/sbin/fence_' + '*')
   agents.each { |a|
@@ -7,7 +7,7 @@ def getFenceAgents(session, fence_agent = nil)
     next if fa.name == "fence_ack_manual"
 
     if fence_agent and a.sub(/.*\//,"") == fence_agent.sub(/.*:/,"")
-      required_options, optional_options, advanced_options, info = getFenceAgentMetadata(session, fa.name)
+      required_options, optional_options, advanced_options, info = getFenceAgentMetadata(auth_user, fa.name)
       fa.required_options = required_options
       fa.optional_options = optional_options
       fa.advanced_options = advanced_options
@@ -18,7 +18,7 @@ def getFenceAgents(session, fence_agent = nil)
   fence_agent_list
 end
 
-def getFenceAgentMetadata(session, fenceagentname)
+def getFenceAgentMetadata(auth_user, fenceagentname)
   options_required = {}
   options_optional = {}
   options_advanced = {
@@ -43,7 +43,7 @@ def getFenceAgentMetadata(session, fenceagentname)
     return [options_required, options_optional, options_advanced]
   end
   stdout, stderr, retval = run_cmd(
-    session, "/usr/sbin/#{fenceagentname}", '-o', 'metadata'
+    auth_user, "/usr/sbin/#{fenceagentname}", '-o', 'metadata'
   )
   metadata = stdout.join
   begin
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 2f58502..e441817 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -16,78 +16,49 @@ require 'resource.rb'
 require 'cluster_entity.rb'
 require 'auth.rb'
 
-def getAllSettings(session, cib_dom=nil)
+def getAllSettings(auth_user, cib_dom=nil)
   unless cib_dom
-    cib_dom = get_cib_dom(session)
+    cib_dom = get_cib_dom(auth_user)
   end
-  stdout2, stderr2, retval2 = run_cmd(session, PENGINE, "metadata")
-  metadata = stdout2.join
   ret = {}
-  if cib_dom and retval2 == 0
-    doc = REXML::Document.new(metadata)
-
-    default = ""
-    el_type = ""
-    doc.elements.each("resource-agent/parameters/parameter") { |e|
-      name = e.attributes["name"]
-      name.gsub!(/-/,"_")
-      e.elements.each("content") { |c|
-        default = c.attributes["default"]
-        el_type = c.attributes["type"]
-      }
-      ret[name] = {"value" => default, "type" => el_type}
-    }
-
+  if cib_dom
     cib_dom.elements.each('/cib/configuration/crm_config//nvpair') { |e|
-      key = e.attributes['name']
-      val = e.attributes['value']
-      key.gsub!(/-/,"_")
-      if ret.has_key?(key)
-        if ret[key]["type"] == "boolean"
-          val == "true" ?  ret[key]["value"] = true : ret[key]["value"] = false
-        else
-          ret[key]["value"] = val
-        end
-
-      else
-        ret[key] = {"value" => val, "type" => "unknown"}
-      end
+      ret[e.attributes['name']] = e.attributes['value']
     }
-    return ret
   end
-  return {"error" => "Unable to get configuration settings"}
+  return ret
 end
 
-def add_fence_level(session, level, devices, node, remove = false)
+def add_fence_level(auth_user, level, devices, node, remove = false)
   if not remove
     stdout, stderr, retval = run_cmd(
-      session, PCS, "stonith", "level", "add", level, node, devices
+      auth_user, PCS, "stonith", "level", "add", level, node, devices
     )
     return retval,stdout, stderr
   else
     stdout, stderr, retval = run_cmd(
-      session, PCS, "stonith", "level", "remove", level, node, devices
+      auth_user, PCS, "stonith", "level", "remove", level, node, devices
     )
     return retval,stdout, stderr
   end
 end
 
-def add_node_attr(session, node, key, value)
+def add_node_attr(auth_user, node, key, value)
   stdout, stderr, retval = run_cmd(
-    session, PCS, "property", "set", "--node", node, key.to_s + '=' + value.to_s
+    auth_user, PCS, "property", "set", "--node", node, key.to_s + '=' + value.to_s
   )
   return retval
 end
 
-def add_meta_attr(session, resource, key, value)
+def add_meta_attr(auth_user, resource, key, value)
   stdout, stderr, retval = run_cmd(
-    session, PCS, "resource", "meta", resource, key.to_s + "=" + value.to_s
+    auth_user, PCS, "resource", "meta", resource, key.to_s + "=" + value.to_s
   )
   return retval
 end
 
 def add_location_constraint(
-  session, resource, node, score, force=false, autocorrect=true
+  auth_user, resource, node, score, force=false, autocorrect=true
 )
   if node == ""
     return "Bad node"
@@ -103,12 +74,12 @@ def add_location_constraint(
   cmd << '--force' if force
   cmd << '--autocorrect' if autocorrect
 
-  stdout, stderr, retval = run_cmd(session, *cmd)
+  stdout, stderr, retval = run_cmd(auth_user, *cmd)
   return retval, stderr.join(' ')
 end
 
 def add_location_constraint_rule(
-  session, resource, rule, score, force=false, autocorrect=true
+  auth_user, resource, rule, score, force=false, autocorrect=true
 )
   cmd = [PCS, "constraint", "location", resource, "rule"]
   if score != ''
@@ -121,12 +92,12 @@ def add_location_constraint_rule(
   cmd.concat(rule.shellsplit())
   cmd << '--force' if force
   cmd << '--autocorrect' if autocorrect
-  stdout, stderr, retval = run_cmd(session, *cmd)
+  stdout, stderr, retval = run_cmd(auth_user, *cmd)
   return retval, stderr.join(' ')
 end
 
 def add_order_constraint(
-    session, resourceA, resourceB, actionA, actionB, score, symmetrical=true,
+    auth_user, resourceA, resourceB, actionA, actionB, score, symmetrical=true,
     force=false, autocorrect=true
 )
   sym = symmetrical ? "symmetrical" : "nonsymmetrical"
@@ -139,12 +110,12 @@ def add_order_constraint(
   ]
   command << '--force' if force
   command << '--autocorrect' if autocorrect
-  stdout, stderr, retval = run_cmd(session, *command)
+  stdout, stderr, retval = run_cmd(auth_user, *command)
   return retval, stderr.join(' ')
 end
 
 def add_order_set_constraint(
-  session, resource_set_list, force=false, autocorrect=true
+  auth_user, resource_set_list, force=false, autocorrect=true
 )
   command = [PCS, "constraint", "order"]
   resource_set_list.each { |resource_set|
@@ -153,12 +124,12 @@ def add_order_set_constraint(
   }
   command << '--force' if force
   command << '--autocorrect' if autocorrect
-  stdout, stderr, retval = run_cmd(session, *command)
+  stdout, stderr, retval = run_cmd(auth_user, *command)
   return retval, stderr.join(' ')
 end
 
 def add_colocation_constraint(
-  session, resourceA, resourceB, score, force=false, autocorrect=true
+  auth_user, resourceA, resourceB, score, force=false, autocorrect=true
 )
   if score == "" or score == nil
     score = "INFINITY"
@@ -168,41 +139,41 @@ def add_colocation_constraint(
   ]
   command << '--force' if force
   command << '--autocorrect' if autocorrect
-  stdout, stderr, retval = run_cmd(session, *command)
+  stdout, stderr, retval = run_cmd(auth_user, *command)
   return retval, stderr.join(' ')
 end
 
-def remove_constraint(session, constraint_id)
+def remove_constraint(auth_user, constraint_id)
   stdout, stderror, retval = run_cmd(
-    session, PCS, "constraint", "remove", constraint_id
+    auth_user, PCS, "constraint", "remove", constraint_id
   )
   $logger.info stdout
   return retval
 end
 
-def remove_constraint_rule(session, rule_id)
+def remove_constraint_rule(auth_user, rule_id)
   stdout, stderror, retval = run_cmd(
-    session, PCS, "constraint", "rule", "remove", rule_id
+    auth_user, PCS, "constraint", "rule", "remove", rule_id
   )
   $logger.info stdout
   return retval
 end
 
-def add_acl_role(session, name, description)
+def add_acl_role(auth_user, name, description)
   cmd = [PCS, "acl", "role", "create", name.to_s]
   if description.to_s != ""
     cmd << "description=#{description.to_s}"
   end
-  stdout, stderror, retval = run_cmd(session, *cmd)
+  stdout, stderror, retval = run_cmd(auth_user, *cmd)
   if retval != 0
     return stderror.join("\n").strip
   end
   return ""
 end
 
-def add_acl_permission(session, acl_role_id, perm_type, xpath_id, query_id)
+def add_acl_permission(auth_user, acl_role_id, perm_type, xpath_id, query_id)
   stdout, stderror, retval = run_cmd(
-    session, PCS, "acl", "permission", "add", acl_role_id.to_s, perm_type.to_s,
+    auth_user, PCS, "acl", "permission", "add", acl_role_id.to_s, perm_type.to_s,
     xpath_id.to_s, query_id.to_s
   )
   if retval != 0
@@ -215,10 +186,10 @@ def add_acl_permission(session, acl_role_id, perm_type, xpath_id, query_id)
   return ""
 end
 
-def add_acl_usergroup(session, acl_role_id, user_group, name)
+def add_acl_usergroup(auth_user, acl_role_id, user_group, name)
   if (user_group == "user") or (user_group == "group")
     stdout, stderr, retval = run_cmd(
-      session, PCS, "acl", user_group, "create", name.to_s, acl_role_id.to_s
+      auth_user, PCS, "acl", user_group, "create", name.to_s, acl_role_id.to_s
     )
     if retval == 0
       return ""
@@ -228,7 +199,7 @@ def add_acl_usergroup(session, acl_role_id, user_group, name)
     end
   end
   stdout, stderror, retval = run_cmd(
-    session, PCS, "acl", "role", "assign", acl_role_id.to_s, name.to_s
+    auth_user, PCS, "acl", "role", "assign", acl_role_id.to_s, name.to_s
   )
   if retval != 0
     if stderror.empty?
@@ -240,9 +211,9 @@ def add_acl_usergroup(session, acl_role_id, user_group, name)
   return ""
 end
 
-def remove_acl_permission(session, acl_perm_id)
+def remove_acl_permission(auth_user, acl_perm_id)
   stdout, stderror, retval = run_cmd(
-    session, PCS, "acl", "permission", "delete", acl_perm_id.to_s
+    auth_user, PCS, "acl", "permission", "delete", acl_perm_id.to_s
   )
   if retval != 0
     if stderror.empty?
@@ -254,9 +225,9 @@ def remove_acl_permission(session, acl_perm_id)
   return ""
 end
 
-def remove_acl_usergroup(session, role_id, usergroup_id)
+def remove_acl_usergroup(auth_user, role_id, usergroup_id)
   stdout, stderror, retval = run_cmd(
-    session, PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s,
+    auth_user, PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s,
     "--autodelete"
   )
   if retval != 0
@@ -290,15 +261,15 @@ def get_cluster_nodes(cluster_name)
   return nodes
 end
 
-def send_cluster_request_with_token(session, cluster_name, request, post=false, data={}, remote=true, raw_data=nil)
+def send_cluster_request_with_token(auth_user, cluster_name, request, post=false, data={}, remote=true, raw_data=nil)
   $logger.info("SCRWT: " + request)
   nodes = get_cluster_nodes(cluster_name)
   return send_nodes_request_with_token(
-    session, nodes, request, post, data, remote, raw_data
+    auth_user, nodes, request, post, data, remote, raw_data
   )
 end
 
-def send_nodes_request_with_token(session, nodes, request, post=false, data={}, remote=true, raw_data=nil)
+def send_nodes_request_with_token(auth_user, nodes, request, post=false, data={}, remote=true, raw_data=nil)
   out = ""
   code = 0
   $logger.info("SNRWT: " + request)
@@ -320,7 +291,7 @@ def send_nodes_request_with_token(session, nodes, request, post=false, data={},
   for node in nodes
     $logger.info "SNRWT Node: #{node} Request: #{request}"
     code, out = send_request_with_token(
-      session, node, request, post, data, remote, raw_data
+      auth_user, node, request, post, data, remote, raw_data
     )
     # try next node if:
     # - current node does not support the request (old version of pcsd?) (404)
@@ -357,7 +328,7 @@ def send_nodes_request_with_token(session, nodes, request, post=false, data={},
   return code, out
 end
 
-def send_request_with_token(session, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, additional_tokens={})
+def send_request_with_token(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, additional_tokens={})
   token = additional_tokens[node] || get_node_token(node)
   $logger.info "SRWT Node: #{node} Request: #{request}"
   if not token
@@ -368,11 +339,11 @@ def send_request_with_token(session, node, request, post=false, data={}, remote=
     'token' => token,
   }
   return send_request(
-    session, node, request, post, data, remote, raw_data, timeout, cookies_data
+    auth_user, node, request, post, data, remote, raw_data, timeout, cookies_data
   )
 end
 
-def send_request(session, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, cookies_data=nil)
+def send_request(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, cookies_data=nil)
   cookies_data = {} if not cookies_data
   begin
     request = "/#{request}" if not request.start_with?("/")
@@ -403,10 +374,10 @@ def send_request(session, node, request, post=false, data={}, remote=true, raw_d
     # We cannot do it for CIB_user however to be backward compatible
     # so we at least remove disallowed characters.
     cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
-      session[:username].to_s
+      auth_user[:username].to_s
     )
     cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
-      (session[:usergroups] || []).join(' ')
+      (auth_user[:usergroups] || []).join(' ')
     )
 
     cookies_data_default.update(cookies_data)
@@ -434,17 +405,17 @@ def send_request(session, node, request, post=false, data={}, remote=true, raw_d
   end
 end
 
-def add_node(session, new_nodename, all=false, auto_start=true)
+def add_node(auth_user, new_nodename, all=false, auto_start=true)
   if all
     command = [PCS, "cluster", "node", "add", new_nodename]
     if auto_start
       command << '--start'
       command << '--enable'
     end
-    out, stderror, retval = run_cmd(session, *command)
+    out, stderror, retval = run_cmd(auth_user, *command)
   else
     out, stderror, retval = run_cmd(
-      session, PCS, "cluster", "localnode", "add", new_nodename
+      auth_user, PCS, "cluster", "localnode", "add", new_nodename
     )
   end
   $logger.info("Adding #{new_nodename} to pcs_settings.conf")
@@ -460,15 +431,15 @@ def add_node(session, new_nodename, all=false, auto_start=true)
   return retval, out.join("\n") + stderror.join("\n")
 end
 
-def remove_node(session, new_nodename, all=false)
+def remove_node(auth_user, new_nodename, all=false)
   if all
     # we check for a quorum loss warning in remote_remove_nodes
     out, stderror, retval = run_cmd(
-      session, PCS, "cluster", "node", "remove", new_nodename, "--force"
+      auth_user, PCS, "cluster", "node", "remove", new_nodename, "--force"
     )
   else
     out, stderror, retval = run_cmd(
-      session, PCS, "cluster", "localnode", "remove", new_nodename
+      auth_user, PCS, "cluster", "localnode", "remove", new_nodename
     )
   end
   $logger.info("Removing #{new_nodename} from pcs_settings.conf")
@@ -486,7 +457,7 @@ end
 
 def get_current_node_name()
   stdout, stderror, retval = run_cmd(
-    PCSAuth.getSuperuserSession, CRM_NODE, "-n"
+    PCSAuth.getSuperuserAuth(), CRM_NODE, "-n"
   )
   if retval == 0 and stdout.length > 0
     return stdout[0].chomp()
@@ -497,7 +468,7 @@ end
 def get_local_node_id()
   if ISRHEL6
     out, errout, retval = run_cmd(
-      PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL, "cluster.cman"
+      PCSAuth.getSuperuserAuth(), COROSYNC_CMAPCTL, "cluster.cman"
     )
     if retval != 0
       return ""
@@ -508,7 +479,7 @@ def get_local_node_id()
     end
     local_node_name = match[1]
     out, errout, retval = run_cmd(
-      PCSAuth.getSuperuserSession,
+      PCSAuth.getSuperuserAuth(),
       CMAN_TOOL, "nodes", "-F", "id", "-n", local_node_name
     )
     if retval != 0
@@ -517,7 +488,7 @@ def get_local_node_id()
     return out[0].strip()
   end
   out, errout, retval = run_cmd(
-    PCSAuth.getSuperuserSession,
+    PCSAuth.getSuperuserAuth(),
     COROSYNC_CMAPCTL, "-g", "runtime.votequorum.this_node_id"
   )
   if retval != 0
@@ -533,7 +504,7 @@ end
 
 def get_corosync_nodes()
   stdout, stderror, retval = run_cmd(
-    PCSAuth.getSuperuserSession, PCS, "status", "nodes", "corosync"
+    PCSAuth.getSuperuserAuth(), PCS, "status", "nodes", "corosync"
   )
   if retval != 0
     return []
@@ -563,7 +534,7 @@ def get_nodes_status()
   pacemaker_standby = []
   in_pacemaker = false
   stdout, stderr, retval = run_cmd(
-    PCSAuth.getSuperuserSession, PCS, "status", "nodes", "both"
+    PCSAuth.getSuperuserAuth(), PCS, "status", "nodes", "both"
   )
   stdout.each {|l|
     l = l.chomp
@@ -606,7 +577,7 @@ def get_nodes_status()
 end
 
 def need_ring1_address?()
-  out, errout, retval = run_cmd(PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL)
+  out, errout, retval = run_cmd(PCSAuth.getSuperuserAuth(), COROSYNC_CMAPCTL)
   if retval != 0
     return false
   else
@@ -645,9 +616,9 @@ def is_cman_with_udpu_transport?
   return false
 end
 
-def get_resource_agents_avail(session)
+def get_resource_agents_avail(auth_user, params)
   code, result = send_cluster_request_with_token(
-    session, params[:cluster], 'get_avail_resource_agents'
+    auth_user, params[:cluster], 'get_avail_resource_agents'
   )
   return {} if 200 != code
   begin
@@ -662,9 +633,9 @@ def get_resource_agents_avail(session)
   end
 end
 
-def get_stonith_agents_avail(session)
+def get_stonith_agents_avail(auth_user, params)
   code, result = send_cluster_request_with_token(
-    session, params[:cluster], 'get_avail_fence_agents'
+    auth_user, params[:cluster], 'get_avail_fence_agents'
   )
   return {} if 200 != code
   begin
@@ -682,7 +653,7 @@ end
 def get_cluster_name()
   if ISRHEL6
     stdout, stderror, retval = run_cmd(
-      PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL, "cluster"
+      PCSAuth.getSuperuserAuth(), COROSYNC_CMAPCTL, "cluster"
     )
     if retval == 0
       stdout.each { |line|
@@ -703,7 +674,7 @@ def get_cluster_name()
   end
 
   stdout, stderror, retval = run_cmd(
-    PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL, "totem.cluster_name"
+    PCSAuth.getSuperuserAuth(), COROSYNC_CMAPCTL, "totem.cluster_name"
   )
   if retval != 0 and not ISRHEL6
     # Cluster probably isn't running, try to get cluster name from
@@ -729,9 +700,9 @@ def get_cluster_name()
   end
 end
 
-def get_node_attributes(session, cib_dom=nil)
+def get_node_attributes(auth_user, cib_dom=nil)
   unless cib_dom
-    cib_dom = get_cib_dom(session)
+    cib_dom = get_cib_dom(auth_user)
     return {} unless cib_dom
   end
   node_attrs = {}
@@ -767,9 +738,9 @@ def get_nodes_utilization(cib_dom)
   return utilization
 end
 
-def get_fence_levels(session, cib_dom=nil)
+def get_fence_levels(auth_user, cib_dom=nil)
   unless cib_dom
-    cib_dom = get_cib_dom(session)
+    cib_dom = get_cib_dom(auth_user)
     return {} unless cib_dom
   end
 
@@ -789,9 +760,9 @@ def get_fence_levels(session, cib_dom=nil)
   return fence_levels
 end
 
-def get_acls(session, cib_dom=nil)
+def get_acls(auth_user, cib_dom=nil)
   unless cib_dom
-    cib_dom = get_cib_dom(session)
+    cib_dom = get_cib_dom(auth_user)
     return {} unless cib_dom
   end
 
@@ -835,14 +806,14 @@ def get_acls(session, cib_dom=nil)
   return acls
 end
 
-def enable_cluster(session)
-  stdout, stderror, retval = run_cmd(session, PCS, "cluster", "enable")
+def enable_cluster(auth_user)
+  stdout, stderror, retval = run_cmd(auth_user, PCS, "cluster", "enable")
   return false if retval != 0
   return true
 end
 
-def disable_cluster(session)
-  stdout, stderror, retval = run_cmd(session, PCS, "cluster", "disable")
+def disable_cluster(auth_user)
+  stdout, stderror, retval = run_cmd(auth_user, PCS, "cluster", "disable")
   return false if retval != 0
   return true
 end
@@ -868,7 +839,7 @@ end
 def get_corosync_version()
   begin
     stdout, stderror, retval = run_cmd(
-      PCSAuth.getSuperuserSession, COROSYNC, "-v"
+      PCSAuth.getSuperuserAuth(), COROSYNC, "-v"
     )
   rescue
     stdout = []
@@ -903,7 +874,7 @@ end
 def get_pacemaker_version()
   begin
     stdout, stderror, retval = run_cmd(
-      PCSAuth.getSuperuserSession, PACEMAKERD, "-$"
+      PCSAuth.getSuperuserAuth(), PACEMAKERD, "-$"
     )
   rescue
     stdout = []
@@ -929,7 +900,7 @@ end
 def get_cman_version()
   begin
     stdout, stderror, retval = run_cmd(
-      PCSAuth.getSuperuserSession, CMAN_TOOL, "-V"
+      PCSAuth.getSuperuserAuth(), CMAN_TOOL, "-V"
     )
   rescue
     stdout = []
@@ -978,12 +949,12 @@ def get_pcsd_version()
   return PCS_VERSION.split(".").collect { | x | x.to_i }
 end
 
-def run_cmd(session, *args)
+def run_cmd(auth_user, *args)
   options = {}
-  return run_cmd_options(session, options, *args)
+  return run_cmd_options(auth_user, options, *args)
 end
 
-def run_cmd_options(session, options, *args)
+def run_cmd_options(auth_user, options, *args)
   $logger.info("Running: " + args.join(" "))
   start = Time.now
   out = ""
@@ -1001,9 +972,9 @@ def run_cmd_options(session, options, *args)
     $logger.debug(errout)
     $logger.debug("Duration: " + duration.to_s + "s")
   }
-  cib_user = session[:username]
+  cib_user = auth_user[:username]
   # when running 'id -Gn' to get the groups they are not defined yet
-  cib_groups = (session[:usergroups] || []).join(' ')
+  cib_groups = (auth_user[:usergroups] || []).join(' ')
   $logger.info("CIB USER: #{cib_user}, groups: #{cib_groups}")
   # Open4.popen4 reimplementation which sets ENV in a child process prior
   # to running an external process by exec
@@ -1077,7 +1048,7 @@ def add_prefix_to_keys(hash, prefix)
   return new_hash
 end
 
-def check_gui_status_of_nodes(session, nodes, check_mutuality=false, timeout=10)
+def check_gui_status_of_nodes(auth_user, nodes, check_mutuality=false, timeout=10)
   options = {}
   options[:check_auth_only] = '' if not check_mutuality
   threads = []
@@ -1089,7 +1060,7 @@ def check_gui_status_of_nodes(session, nodes, check_mutuality=false, timeout=10)
   nodes.each { |node|
     threads << Thread.new {
       code, response = send_request_with_token(
-        session, node, 'check_auth', false, options, true, nil, timeout
+        auth_user, node, 'check_auth', false, options, true, nil, timeout
       )
       if code == 200
         if check_mutuality
@@ -1123,7 +1094,7 @@ def check_gui_status_of_nodes(session, nodes, check_mutuality=false, timeout=10)
   return online_nodes, offline_nodes, not_authorized_nodes
 end
 
-def pcs_auth(session, nodes, username, password, force=false, local=true)
+def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
   # if no sync is needed, do not report a sync error
   sync_successful = true
   sync_failed_nodes = []
@@ -1131,7 +1102,7 @@ def pcs_auth(session, nodes, username, password, force=false, local=true)
   # check for already authorized nodes
   if not force
     online, offline, not_authenticated = check_gui_status_of_nodes(
-      session, nodes, true
+      auth_user, nodes, true
     )
     if not_authenticated.length < 1
       result = {}
@@ -1143,7 +1114,7 @@ def pcs_auth(session, nodes, username, password, force=false, local=true)
 
   # authorize the nodes locally (i.e. not bidirectionally)
   auth_responses = run_auth_requests(
-    session, nodes, nodes, username, password, force, true
+    auth_user, nodes, nodes, username, password, force, true
   )
 
   # get the tokens and sync them within the local cluster
@@ -1191,7 +1162,7 @@ def pcs_auth(session, nodes, username, password, force=false, local=true)
         nodes_to_auth << node if not cluster_nodes.include?(node)
       }
       auth_responses2 = run_auth_requests(
-        session, nodes_to_auth, nodes, username, password, force, false
+        auth_user, nodes_to_auth, nodes, username, password, force, false
       )
       auth_responses.update(auth_responses2)
     end
@@ -1200,7 +1171,7 @@ def pcs_auth(session, nodes, username, password, force=false, local=true)
   return auth_responses, sync_successful, sync_failed_nodes, sync_responses
 end
 
-def run_auth_requests(session, nodes_to_send, nodes_to_auth, username, password, force=false, local=true)
+def run_auth_requests(auth_user, nodes_to_send, nodes_to_auth, username, password, force=false, local=true)
   data = {}
   nodes_to_auth.each_with_index { |node, index|
     data["node-#{index}"] = node
@@ -1214,7 +1185,7 @@ def run_auth_requests(session, nodes_to_send, nodes_to_auth, username, password,
   threads = []
   nodes_to_send.each { |node|
     threads << Thread.new {
-      code, response = send_request(session, node, 'auth', true, data)
+      code, response = send_request(auth_user, node, 'auth', true, data)
       if 200 == code
         token = response.strip
         if '' == token
@@ -1232,7 +1203,7 @@ def run_auth_requests(session, nodes_to_send, nodes_to_auth, username, password,
 end
 
 def send_local_configs_to_nodes(
-  session, nodes, force=false, clear_local_permissions=false
+  auth_user, nodes, force=false, clear_local_permissions=false
 )
   configs = Cfgsync::get_configs_local(true)
   if clear_local_permissions
@@ -1241,12 +1212,12 @@ def send_local_configs_to_nodes(
     configs[Cfgsync::PcsdSettings.name].text = pcs_config.text()
   end
   publisher = Cfgsync::ConfigPublisher.new(
-    session, configs.values(), nodes, $cluster_name
+    auth_user, configs.values(), nodes, $cluster_name
   )
   return publisher.send(force)
 end
 
-def send_local_certs_to_nodes(session, nodes)
+def send_local_certs_to_nodes(auth_user, nodes)
   begin
     data = {
       'ssl_cert' => File.read(CRT_FILE),
@@ -1283,7 +1254,7 @@ def send_local_certs_to_nodes(session, nodes)
   nodes.each { |node|
     threads << Thread.new {
       code, response = send_request_with_token(
-        session, node, '/set_certs', true, data
+        auth_user, node, '/set_certs', true, data
       )
       node_response[node] = [code, response]
     }
@@ -1328,13 +1299,13 @@ def send_local_certs_to_nodes(session, nodes)
   }
 end
 
-def pcsd_restart_nodes(session, nodes)
+def pcsd_restart_nodes(auth_user, nodes)
   node_response = {}
   threads = []
   nodes.each { |node|
     threads << Thread.new {
       code, response = send_request_with_token(
-        session, node, '/pcsd_restart', true
+        auth_user, node, '/pcsd_restart', true
       )
       node_response[node] = [code, response]
     }
@@ -1402,7 +1373,7 @@ def verify_cert_key_pair(cert, key)
   key_modulus = nil
 
   stdout, stderr, retval = run_cmd_options(
-    PCSAuth.getSuperuserSession(),
+    PCSAuth.getSuperuserAuth(),
     {
       'stdin' => cert,
     },
@@ -1415,7 +1386,7 @@ def verify_cert_key_pair(cert, key)
   end
 
   stdout, stderr, retval = run_cmd_options(
-    PCSAuth.getSuperuserSession(),
+    PCSAuth.getSuperuserAuth(),
     {
       'stdin' => key,
     },
@@ -1443,7 +1414,7 @@ def verify_cookie_secret(secret)
   return []
 end
 
-def cluster_status_from_nodes(session, cluster_nodes, cluster_name)
+def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
   node_map = {}
   forbidden_nodes = {}
   overview = {
@@ -1460,7 +1431,7 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name)
   cluster_nodes.uniq.each { |node|
     threads << Thread.new {
       code, response = send_request_with_token(
-        session,
+        auth_user,
         node,
         'status',
         false,
@@ -1651,7 +1622,7 @@ def get_node_uptime()
   return '%d day%s, %02d:%02d:%02d' % [dd, dd != 1?'s':'', hh, mm, ss]
 end
 
-def get_node_status(session, cib_dom)
+def get_node_status(auth_user, cib_dom)
   node_status = {
       :cluster_name => $cluster_name,
       :groups => [],
@@ -1663,10 +1634,10 @@ def get_node_status(session, cib_dom)
       :cluster_settings => {},
       :need_ring1_address => need_ring1_address?,
       :is_cman_with_udpu_transport => is_cman_with_udpu_transport?,
-      :acls => get_acls(session, cib_dom),
-      :username => session[:username],
-      :fence_levels => get_fence_levels(session, cib_dom),
-      :node_attr => node_attrs_to_v2(get_node_attributes(session, cib_dom)),
+      :acls => get_acls(auth_user, cib_dom),
+      :username => auth_user[:username],
+      :fence_levels => get_fence_levels(auth_user, cib_dom),
+      :node_attr => node_attrs_to_v2(get_node_attributes(auth_user, cib_dom)),
       :nodes_utilization => get_nodes_utilization(cib_dom),
       :known_nodes => []
   }
@@ -1688,10 +1659,7 @@ def get_node_status(session, cib_dom)
     node_status[:constraints] = getAllConstraints(cib_dom.elements['/cib/configuration/constraints'])
   end
 
-  cluster_settings = getAllSettings(session, cib_dom)
-  if not cluster_settings.has_key?('error')
-    node_status[:cluster_settings] = cluster_settings
-  end
+  node_status[:cluster_settings] = getAllSettings(auth_user, cib_dom)
 
   return node_status
 end
@@ -1766,10 +1734,10 @@ def get_resource_by_id(id, cib_dom, crm_dom=nil, rsc_status=nil, operations=fals
   end
 end
 
-def get_crm_mon_dom(session)
+def get_crm_mon_dom(auth_user)
   begin
     stdout, _, retval = run_cmd(
-      session, CRM_MON, '--one-shot', '-r', '--as-xml'
+      auth_user, CRM_MON, '--one-shot', '-r', '--as-xml'
     )
     if retval == 0
       return REXML::Document.new(stdout.join("\n"))
@@ -1780,9 +1748,9 @@ def get_crm_mon_dom(session)
   return nil
 end
 
-def get_cib_dom(session)
+def get_cib_dom(auth_user)
   begin
-    stdout, _, retval = run_cmd(session, 'cibadmin', '-Q', '-l')
+    stdout, _, retval = run_cmd(auth_user, 'cibadmin', '-Q', '-l')
     if retval == 0
       return REXML::Document.new(stdout.join("\n"))
     end
@@ -1847,18 +1815,18 @@ def status_v1_to_v2(status)
   return new_status
 end
 
-def allowed_for_local_cluster(session, action)
+def allowed_for_local_cluster(auth_user, action)
   pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
   return pcs_config.permissions_local.allows?(
-    session[:username], session[:usergroups], action
+    auth_user[:username], auth_user[:usergroups], action
   )
 end
 
-def allowed_for_superuser(session)
+def allowed_for_superuser(auth_user)
   $logger.debug(
-    "permission check superuser username=#{session[:username]} groups=#{session[:groups]}"
+    "permission check superuser username=#{auth_user[:username]} groups=#{auth_user[:usergroups]}"
   )
-  if SUPERUSER != session[:username]
+  if SUPERUSER != auth_user[:username]
     $logger.debug('permission denied')
     return false
   end
diff --git a/pcsd/pcsd b/pcsd/pcsd
index 1257f9c..6b3b04f 100755
--- a/pcsd/pcsd
+++ b/pcsd/pcsd
@@ -26,7 +26,9 @@ prog="pcsd"
 config="/var/lib/pcsd"
 pidfile="/var/run/pcsd.pid"
 
+set -a
 [ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+set +a
 
 lockfile=/var/lock/subsys/$prog
 
diff --git a/pcsd/pcsd-cli.rb b/pcsd/pcsd-cli.rb
index 630ff87..06578e5 100755
--- a/pcsd/pcsd-cli.rb
+++ b/pcsd/pcsd-cli.rb
@@ -26,7 +26,7 @@ end
 
 
 # bootstrap, emulate environment created by pcsd http server
-session = {}
+auth_user = {}
 PCS = get_pcs_path(File.expand_path(File.dirname(__FILE__)))
 $logger_device = StringIO.new
 $logger = configure_logger($logger_device)
@@ -35,28 +35,28 @@ $logger = configure_logger($logger_device)
 uid = Process.uid
 if 0 == uid
   if ENV['CIB_user'] and ENV['CIB_user'].strip != ''
-    session[:username] = ENV['CIB_user']
+    auth_user[:username] = ENV['CIB_user']
     if ENV['CIB_user_groups'] and ENV['CIB_user_groups'].strip != ''
-      session[:usergroups] = ENV['CIB_user_groups'].split(nil)
+      auth_user[:usergroups] = ENV['CIB_user_groups'].split(nil)
     else
-      session[:usergroups] = []
+      auth_user[:usergroups] = []
     end
   else
-    session[:username] = SUPERUSER
-    session[:usergroups] = []
+    auth_user[:username] = SUPERUSER
+    auth_user[:usergroups] = []
   end
 else
   username = Etc.getpwuid(uid).name
   if not PCSAuth.isUserAllowedToLogin(username)
     cli_exit('access_denied')
   else
-    session[:username] = username
+    auth_user[:username] = username
     success, groups = PCSAuth.getUsersGroups(username)
-    session[:usergroups] = success ? groups : []
+    auth_user[:usergroups] = success ? groups : []
   end
 end
 
-# continue environment setup with user set in session
+# continue environment setup with user set in auth_user
 $cluster_name = get_cluster_name()
 
 # get params and run a command
@@ -66,14 +66,14 @@ allowed_commands = {
     # returns tokens of the user who runs pcsd-cli, thus no permission check
     'only_superuser' => false,
     'permissions' => nil,
-    'call' => lambda { |params, session| read_tokens() },
+    'call' => lambda { |params, auth_user_| read_tokens() },
   },
   'auth' => {
     'only_superuser' => false,
     'permissions' => nil,
-    'call' => lambda { |params, session|
+    'call' => lambda { |params, auth_user_|
       auth_responses, sync_successful, sync_nodes_err, sync_responses = pcs_auth(
-        session, params['nodes'] || [], params['username'] || '',
+        auth_user_, params['nodes'] || [], params['username'] || '',
         params['password'] || '', params['force'], params['local']
       )
       return {
@@ -87,11 +87,11 @@ allowed_commands = {
   'send_local_configs' => {
     'only_superuser' => false,
     'permissions' => Permissions::FULL,
-    'call' => lambda { |params, session|
+    'call' => lambda { |params, auth_user_|
       send_local_configs_to_nodes(
         # for a case when sending to a node which is being added to a cluster
         # - the node doesn't have the config so it cannot check permissions
-        PCSAuth.getSuperuserSession(),
+        PCSAuth.getSuperuserAuth(),
         params['nodes'] || [],
         params['force'] || false,
         params['clear_local_cluster_permissions'] || false
@@ -101,15 +101,15 @@ allowed_commands = {
   'send_local_certs' => {
     'only_superuser' => false,
     'permissions' => Permissions::FULL,
-    'call' => lambda { |params, session|
-      send_local_certs_to_nodes(session, params['nodes'] || [])
+    'call' => lambda { |params, auth_user_|
+      send_local_certs_to_nodes(auth_user_, params['nodes'] || [])
     }
   },
   'pcsd_restart_nodes' => {
     'only_superuser' => false,
     'permissions' => nil,
-    'call' => lambda { |params, session|
-      pcsd_restart_nodes(session, params['nodes'] || [])
+    'call' => lambda { |params, auth_user_|
+      pcsd_restart_nodes(auth_user_, params['nodes'] || [])
     }
   },
 }
@@ -121,16 +121,16 @@ if allowed_commands.key?(command)
     cli_exit('bad_json_input', e.to_s)
   end
   if allowed_commands['only_superuser']
-    if not allowed_for_superuser(session)
+    if not allowed_for_superuser(auth_user)
       cli_exit('permission_denied')
     end
   end
   if allowed_commands['permissions']
-    if not allowed_for_local_cluster(session, command_settings['permissions'])
+    if not allowed_for_local_cluster(auth_user, command_settings['permissions'])
       cli_exit('permission_denied')
     end
   end
-  result = allowed_commands[command]['call'].call(params, session)
+  result = allowed_commands[command]['call'].call(params, auth_user)
   cli_exit('ok', nil, result)
 else
   cli_exit('bad_command')
diff --git a/pcsd/pcsd.conf b/pcsd/pcsd.conf
index f596d59..5ac8bc7 100644
--- a/pcsd/pcsd.conf
+++ b/pcsd/pcsd.conf
@@ -1,4 +1,9 @@
 # pcsd configuration file
 # Set PCSD_DEBUG to true for advanced pcsd debugging information
 PCSD_DEBUG=false
+# Set DISABLE_GUI to true to disable GUI frontend in pcsd
+DISABLE_GUI=false
+# Set web UI sesions lifetime
+SESSION_LIFETIME=3600
+# Do not change
 RACK_ENV=production
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index 059d3a2..ea22775 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -19,6 +19,7 @@ require 'auth.rb'
 require 'wizard.rb'
 require 'cfgsync.rb'
 require 'permissions.rb'
+require 'session.rb'
 
 Dir["wizards/*.rb"].each {|file| require file}
 
@@ -43,12 +44,19 @@ rescue Errno::ENOENT
   File.open(COOKIE_FILE, 'w', 0700) {|f| f.write(secret)}
 end
 
-use Rack::Session::Cookie,
-  :expire_after => 60 * 60,
+session_lifetime = ENV['SESSION_LIFETIME'].to_i()
+session_lifetime = 60 * 60 unless session_lifetime > 0
+use SessionPoolLifetime,
+  :expire_after => session_lifetime,
   :secret => secret,
   :secure => true, # only send over HTTPS
   :httponly => true # don't provide to javascript
 
+# session storage instance
+# will be created by Rack later and fetched in "before" filter
+$session_storage = nil
+$session_storage_env = {}
+
 #use Rack::SSL
 
 if development?
@@ -65,18 +73,22 @@ if development?
 end
 
 before do
+  @auth_user = nil
+
+  # get session storage instance from env
+  if not $session_storage and env[:__session_storage]
+    $session_storage = env[:__session_storage]
+    $session_storage_env = env
+  end
+
   if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth'
     protected! 
   end
   $cluster_name = get_cluster_name()
-  @errorval = session[:errorval]
-  @error = session[:error]
-  session[:errorval] = nil
-  session[:error] = nil
 end
 
 configure do
-  DISABLE_GUI = false
+  DISABLE_GUI = (ENV['DISABLE_GUI'] and ENV['DISABLE_GUI'].downcase == 'true')
   PCS = get_pcs_path(File.expand_path(File.dirname(__FILE__)))
   logger = File.open("/var/log/pcsd/pcsd.log", "a+", 0600)
   STDOUT.reopen(logger)
@@ -101,7 +113,7 @@ $thread_cfgsync = Thread.new {
           if cluster_name and !cluster_name.empty?()
             $logger.debug('Config files sync thread fetching')
             fetcher = Cfgsync::ConfigFetcher.new(
-              PCSAuth.getSuperuserSession(), Cfgsync::get_cfg_classes(),
+              PCSAuth.getSuperuserAuth(), Cfgsync::get_cfg_classes(),
               get_corosync_nodes(), cluster_name
             )
             cfgs_to_save, _ = fetcher.fetch()
@@ -119,28 +131,49 @@ $thread_cfgsync = Thread.new {
   end
 }
 
+$thread_session_expired = Thread.new {
+  while true
+    sleep(60 * 5)
+    begin
+      if $session_storage
+        $session_storage.drop_expired($session_storage_env)
+      end
+    rescue => e
+      $logger.warn("Exception while removing expired sessions: #{e}")
+    end
+  end
+}
+
 helpers do
+  def is_ajax?
+    return request.env['HTTP_X_REQUESTED_WITH'] == 'XMLHttpRequest'
+  end
+
   def protected!
-    if not PCSAuth.loginByToken(session, cookies) and not PCSAuth.isLoggedIn(session)
-      # If we're on /managec/<cluster_name>/main we redirect
-      match_expr = "/managec/(.*)/(.*)"
-      mymatch = request.path.match(match_expr)
-      on_managec_main = false
-      if mymatch and mymatch.length >= 3 and mymatch[2] == "main"
-        on_managec_main = true
+    gui_request = ( # these are URLs for web pages
+      request.path == '/' or
+      request.path == '/manage' or
+      request.path == '/permissions' or
+      request.path.match('/managec/.+/main')
+    )
+    if request.path.start_with?('/remote/') or request.path == '/run_pcs'
+      @auth_user = PCSAuth.loginByToken(cookies)
+      unless @auth_user
+        halt [401, '{"notauthorized":"true"}']
       end
-
-      if request.path.start_with?('/remote') or
-        (request.path.match(match_expr) and not on_managec_main) or
-        '/run_pcs' == request.path or
-        '/clusters_overview' == request.path or
-        request.path.start_with?('/permissions_')
-      then
-        $logger.info "ERROR: Request without authentication"
+    else #/managec/* /manage/* /permissions
+      if !gui_request and !is_ajax? then
+        # Accept non GUI requests only with header
+        # "X_REQUESTED_WITH: XMLHttpRequest". (check if they are send via AJAX).
+        # This prevents CSRF attack.
         halt [401, '{"notauthorized":"true"}']
-      else
-        session[:pre_login_path] = request.path
-        redirect '/login'
+      elsif not PCSAuth.isLoggedIn(session)
+        if gui_request
+          session[:pre_login_path] = request.path
+          redirect '/login'
+        else
+          halt [401, '{"notauthorized":"true"}']
+        end
       end
     end
   end
@@ -162,11 +195,11 @@ helpers do
 end
 
 get '/remote/?:command?' do
-  return remote(params, request, session)
+  return remote(params, request, @auth_user)
 end
 
 post '/remote/?:command?' do
-  return remote(params, request, session)
+  return remote(params, request, @auth_user)
 end
 
 post '/run_pcs' do
@@ -301,12 +334,12 @@ post '/run_pcs' do
   end
 
   if command_settings['only_superuser']
-    if not allowed_for_superuser(session)
+    if not allowed_for_superuser(@auth_user)
       return 403, 'Permission denied'
     end
   end
   if command_settings['permissions']
-    if not allowed_for_local_cluster(session, command_settings['permissions'])
+    if not allowed_for_local_cluster(@auth_user, command_settings['permissions'])
       return 403, 'Permission denied'
     end
   end
@@ -314,7 +347,7 @@ post '/run_pcs' do
   options = {}
   options['stdin'] = std_in if std_in
   std_out, std_err, retval = run_cmd_options(
-    session, options, PCS, *command_decoded
+    @auth_user, options, PCS, *command_decoded
   )
   result = {
     'status' => 'ok',
@@ -330,13 +363,17 @@ end
 if not DISABLE_GUI
   get('/login'){ erb :login, :layout => :main }
 
-  get '/logout' do 
-    session.clear
-    erb :login, :layout => :main
+  get '/logout' do
+    session.destroy
+    redirect '/login'
   end
 
   post '/login' do
-    if PCSAuth.loginByPassword(session, params['username'], params['password'])
+    auth_user = PCSAuth.loginByPassword(
+      params['username'], params['password']
+    )
+    if auth_user
+      PCSAuth.authUserToSession(auth_user, session)
       # Temporarily ignore pre_login_path until we come up with a list of valid
       # paths to redirect to (to prevent status_all issues)
       #    if session["pre_login_path"]
@@ -348,149 +385,20 @@ if not DISABLE_GUI
       #      end
       #      redirect plp
       #    else
-      redirect '/manage'
+      session.delete(:bad_login_name)
+      if is_ajax?
+        halt [200, "OK"]
+      else
+        redirect '/manage'
+      end
       #    end
     else
-      session["bad_login_name"] = params['username']
-      redirect '/login?badlogin=1'
-    end
-  end
-
-  get '/manage/?' do
-    @manage = true
-    erb :manage, :layout => :main
-  end
-
-  get '/clusters_overview' do
-    clusters_overview(params, request, session)
-  end
-
-  get '/permissions/?' do
-    @manage = true
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
-    @clusters = pcs_config.clusters.sort { |a, b| a.name <=> b.name }
-    erb :permissions, :layout => :main
-  end
-
-  get '/permissions_cluster_form/:cluster/?' do
-    @cluster_name = params[:cluster]
-    @error = nil
-    @permission_types = []
-    @permissions_dependencies = {}
-    @user_types = []
-    @users_permissions = []
-
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
-
-    if not pcs_config.is_cluster_name_in_use(@cluster_name)
-      @error = 'Cluster not found'
-    else
-      code, data = send_cluster_request_with_token(
-        session, @cluster_name, 'get_permissions'
-      )
-      if 404 == code
-        @error = 'Cluster is running an old version of pcsd which does not support permissions'
-      elsif 403 == code
-        @error = 'Permission denied'
-      elsif 200 != code
-        @error = 'Unable to load permissions of the cluster'
+      if is_ajax?
+        halt [401, '{"notauthorized":"true"}']
       else
-        begin
-          permissions = JSON.parse(data)
-          if permissions['notoken'] or permissions['noresponse']
-            @error = 'Unable to load permissions of the cluster'
-          else
-            @permission_types = permissions['permission_types'] || []
-            @permissions_dependencies = permissions['permissions_dependencies'] || {}
-            @user_types = permissions['user_types'] || []
-            @users_permissions = permissions['users_permissions'] || []
-          end
-        rescue JSON::ParserError
-          @error = 'Unable to read permissions of the cluster'
-        end
-      end
-    end
-    erb :_permissions_cluster
-  end
-
-  post '/permissions_save/?' do
-    cluster_name = params['cluster_name']
-    params.delete('cluster_name')
-    new_params = {
-      'json_data' => JSON.generate(params)
-    }
-    return send_cluster_request_with_token(
-      session, cluster_name, "set_permissions", true, new_params
-    )
-  end
-
-  get '/managec/:cluster/main' do
-    @cluster_name = params[:cluster]
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
-    @clusters = pcs_config.clusters
-    @nodes = get_cluster_nodes(params[:cluster])
-    if @nodes == []
-      redirect '/manage/?error=badclustername&errorval=' + params[:cluster] + '#manage'
-    end
-    @resource_agents = get_resource_agents_avail(session)
-    @stonith_agents = get_stonith_agents_avail(session)
-    @config_options = getConfigOptions2(session, @nodes)
-
-    erb :nodes, :layout => :main
-  end
-
-  get '/managec/:cluster/status_all' do
-    status_all(params, request, session, get_cluster_nodes(params[:cluster]))
-  end
-
-  get '/managec/:cluster/cluster_status' do
-    cluster_status_gui(session, params[:cluster])
-  end
-
-  get '/managec/:cluster/overview_cluster' do
-    overview_cluster(params, request, session)
-  end
-
-  get '/managec/:cluster/?*' do
-    raw_data = request.env["rack.input"].read
-    if params[:cluster]
-      send_cluster_request_with_token(
-        session, params[:cluster], "/" + params[:splat].join("/"), false, params,
-        true, raw_data
-      )
-    end
-  end
-
-  post '/managec/:cluster/?*' do
-    raw_data = request.env["rack.input"].read
-    if params[:cluster]
-      request = "/" + params[:splat].join("/")
-      code, out = send_cluster_request_with_token(
-        session, params[:cluster], request, true, params, true, raw_data
-      )
-
-      # backward compatibility layer BEGIN
-      # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older
-      redirection = {
-          "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
-          "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
-      }
-      if code == 404 and redirection.key?(request)
-        code, out = send_cluster_request_with_token(
-          session, params[:cluster], redirection[request], true, params, false,
-          raw_data
-        )
+        session[:bad_login_name] = params['username']
+        redirect '/login'
       end
-      # bcl END
-      return code, out
-    end
-  end
-
-  get '/manage/:node/?*' do
-    if params[:node]
-      return send_request_with_token(
-        session, params[:node], params[:splat].join("/"), false, {}, false
-      )
     end
   end
 
@@ -498,50 +406,44 @@ if not DISABLE_GUI
     pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
     node = params['node-name']
     code, result = send_request_with_token(
-      PCSAuth.getSuperuserSession(), node, 'status'
+      PCSAuth.getSuperuserAuth(), node, 'status'
     )
     begin
       status = JSON.parse(result)
     rescue JSON::ParserError
-      session[:error] = "genericerror"
-      session[:errorval] = 'Unable to communicate with remote pcsd.'
-      redirect '/manage'
+      return 400, "Unable to communicate with remote pcsd on node '#{node}'."
     end
 
+    warning_messages = []
+
     if status.has_key?("corosync_offline") and
       status.has_key?("corosync_online") then
       nodes = status["corosync_offline"] + status["corosync_online"]
 
-      if status["cluster_name"] == ""
-        session[:error] = "noname"
-        session[:errorval] = node
-        redirect '/manage'
+      if status["cluster_name"] == ''
+        return 400, "The node, '#{noname}', does not currently have a cluster
+ configured.  You must create a cluster using this node before adding it to pcsd."
       end
 
       if pcs_config.is_cluster_name_in_use(status["cluster_name"])
-        session[:error] = "duplicatename"
-        session[:errorval] = status["cluster_name"]
-        redirect '/manage'
+        return 400, "The cluster name, '#{status['cluster_name']}' has
+already been added to pcsd.  You may not add two clusters with the same name into pcsd."
       end
 
       # auth begin
       retval, out = send_request_with_token(
-        PCSAuth.getSuperuserSession(), node, '/get_cluster_tokens'
+        PCSAuth.getSuperuserAuth(), node, '/get_cluster_tokens'
       )
       if retval == 404 # backward compatibility layer
-        session[:error] = "authimposible"
+        warning_messages << "Unable to do correct authentication of cluster because it is running old version of pcs/pcsd."
       else
         if retval != 200
-          session[:error] = "cannotgettokens"
-          session[:errorval] = status["cluster_name"]
-          redirect '/manage'
+          return 400, "Unable to get authentication info from cluster '#{status['cluster_name']}'."
         end
         begin
           new_tokens = JSON.parse(out)
         rescue
-          session[:error] = "cannotgettokens"
-          session[:errorval] = status["cluster_name"]
-          redirect '/manage'
+          return 400, "Unable to get authentication info from cluster '#{status['cluster_name']}'."
         end
 
         sync_config = Cfgsync::PcsdTokens.from_file('')
@@ -549,9 +451,7 @@ if not DISABLE_GUI
           sync_config, new_tokens, get_corosync_nodes(), $cluster_name
         )
         if not pushed
-          session[:error] = "configversionsconflict"
-          session[:errorval] = sync_config.class.name
-          redirect '/manage'
+          return 400, "Configuration conflict detected.\n\nSome nodes had a newer configuration than the local node. Local node's configuration was updated.  Please repeat the last action if appropriate."
         end
       end
       #auth end
@@ -563,21 +463,22 @@ if not DISABLE_GUI
         sync_config, get_corosync_nodes(), $cluster_name, true
       )
       if not pushed
-        session[:error] = 'configversionsconflict'
-        session[:errorval] = sync_config.class.name
+        return 400, "Configuration conflict detected.\n\nSome nodes had a newer configuration than the local node. Local node's configuration was updated.  Please repeat the last action if appropriate."
       end
-      redirect '/manage'
+      return 200, warning_messages.join("\n\n")
     else
-      redirect '/manage/?error=notauthorized#manage'
+      return 400, "Unable to communicate with remote pcsd on node '#{node}'."
     end
   end
 
   post '/manage/newcluster' do
-    if not allowed_for_superuser(session)
-      session[:error] = "permissiondenied"
-      redirect '/manage'
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    if not allowed_for_superuser(auth_user)
+      return 400, 'Permission denied.'
     end
 
+    warning_messages = []
+
     pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
     @manage = true
     @cluster_name = params[:clustername]
@@ -600,16 +501,12 @@ if not DISABLE_GUI
       end
     }
     if pcs_config.is_cluster_name_in_use(@cluster_name)
-      session[:error] = "duplicatename"
-      session[:errorval] = @cluster_name
-      redirect '/manage'
+      return 400, "The cluster name, '#{@cluster_name}' has already been added to pcsd.  You may not add two clusters with the same name into pcsd."
     end
 
     @nodes.each {|n|
       if pcs_config.is_node_in_use(n)
-        session[:error] = "duplicatenodename"
-        session[:errorval] = n
-        redirect '/manage'
+        return 400, "The node, '#{n}' is already configured in pcsd.  You may not add a node to two different clusters in pcsd."
       end
     }
 
@@ -617,15 +514,13 @@ if not DISABLE_GUI
     tokens = add_prefix_to_keys(get_tokens_of_nodes(@nodes), "node:")
     @nodes.each {|n|
       retval, out = send_request_with_token(
-        session, n, "/save_tokens", true, tokens
+        auth_user, n, "/save_tokens", true, tokens
       )
       if retval == 404 # backward compatibility layer
-        session[:error] = "authimposible"
+        warning_messages << "Unable to do correct authentication of cluster on node '#{n}', because it is running old version of pcs/pcsd."
         break
       elsif retval != 200
-        session[:error] = "cannotsavetokens"
-        session[:errorval] = n
-        redirect '/manage'
+        return 400, "Unable to authenticate all nodes on node '#{n}'."
       end
     }
 
@@ -635,7 +530,7 @@ if not DISABLE_GUI
       "Sending setup cluster request for: #{@cluster_name} to: #{node_to_send_to}"
     )
     code,out = send_request_with_token(
-      session,
+      auth_user,
       node_to_send_to,
       'setup_cluster',
       true,
@@ -667,15 +562,13 @@ if not DISABLE_GUI
         break if pushed
       }
       if not pushed
-        session[:error] = 'configversionsconflict'
-        session[:errorval] = Cfgsync::PcsdSettings.name
+        return 400, "Configuration conflict detected.\n\nSome nodes had a newer configuration than the local node. Local node's configuration was updated.  Please repeat the last action if appropriate."
       end
     else
-      session[:error] = "unabletocreate"
-      session[:errorval] = out
+      return 400, "Unable to create new cluster. If cluster already exists on one or more of the nodes run 'pcs cluster destroy' on all nodes to remove current cluster configuration.\n\n#{node_to_send_to}: #{out}"
     end
 
-    redirect '/manage'
+    return warning_messages.join("\n\n")
   end
 
   post '/manage/removecluster' do
@@ -690,10 +583,479 @@ if not DISABLE_GUI
       sync_config, get_corosync_nodes(), $cluster_name, true
     )
     if not pushed
-      session[:error] = 'configversionsconflict'
-      session[:errorval] = sync_config.class.name
+      return 400, "Configuration conflict detected.\n\nSome nodes had a newer configuration than the local node.  Local node's configuration was updated.  Please repeat the last action if appropriate."
+    end
+  end
+
+  get '/manage/check_pcsd_status' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    node_results = {}
+    if params[:nodes] != nil and params[:nodes] != ''
+      node_array = params[:nodes].split(',')
+      online, offline, notauthorized = check_gui_status_of_nodes(
+        auth_user, node_array
+      )
+      online.each { |node|
+        node_results[node] = 'Online'
+      }
+      offline.each { |node|
+        node_results[node] = 'Offline'
+      }
+      notauthorized.each { |node|
+        node_results[node] = 'Unable to authenticate'
+      }
+    end
+    return JSON.generate(node_results)
+  end
+
+  get '/manage/get_nodes_sw_versions' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    if params[:nodes] != nil and params[:nodes] != ''
+      nodes = params[:nodes].split(',')
+      final_response = {}
+      threads = []
+      nodes.each {|node|
+        threads << Thread.new {
+          code, response = send_request_with_token(
+            auth_user, node, 'get_sw_versions'
+          )
+          begin
+            node_response = JSON.parse(response)
+            if node_response and node_response['notoken'] == true
+              $logger.error("ERROR: bad token for #{node}")
+            end
+            final_response[node] = node_response
+          rescue JSON::ParserError
+          end
+        }
+      }
+      threads.each { |t| t.join }
+      return JSON.generate(final_response)
+    end
+    return '{}'
+  end
+
+  post '/manage/auth_gui_against_nodes' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    node_auth_error = {}
+    new_tokens = {}
+    threads = []
+    params.each { |node|
+      threads << Thread.new {
+        if node[0].end_with?("-pass") and node[0].length > 5
+          nodename = node[0][0..-6]
+          if params.has_key?("all")
+            pass = params["pass-all"]
+          else
+            pass = node[1]
+          end
+          data = {
+            'node-0' => nodename,
+            'username' => SUPERUSER,
+            'password' => pass,
+            'force' => 1,
+          }
+          node_auth_error[nodename] = 1
+          code, response = send_request(auth_user, nodename, 'auth', true, data)
+          if 200 == code
+            token = response.strip
+            if not token.empty?
+              new_tokens[nodename] = token
+              node_auth_error[nodename] = 0
+            end
+          end
+        end
+      }
+    }
+    threads.each { |t| t.join }
+
+    if not new_tokens.empty?
+      cluster_nodes = get_corosync_nodes()
+      tokens_cfg = Cfgsync::PcsdTokens.from_file('')
+      sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
+        tokens_cfg, new_tokens, cluster_nodes, $cluster_name
+      )
+    end
+
+    return [200, JSON.generate({'node_auth_error' => node_auth_error})]
+  end
+
+  get '/manage/?' do
+    @manage = true
+    erb :manage, :layout => :main
+  end
+
+  get '/clusters_overview' do
+    clusters_overview(params, request, PCSAuth.sessionToAuthUser(session))
+  end
+
+  get '/permissions/?' do
+    @manage = true
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    @clusters = pcs_config.clusters.sort { |a, b| a.name <=> b.name }
+    erb :permissions, :layout => :main
+  end
+
+  get '/permissions_cluster_form/:cluster/?' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    @cluster_name = params[:cluster]
+    @error = nil
+    @permission_types = []
+    @permissions_dependencies = {}
+    @user_types = []
+    @users_permissions = []
+
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+
+    if not pcs_config.is_cluster_name_in_use(@cluster_name)
+      @error = 'Cluster not found'
+    else
+      code, data = send_cluster_request_with_token(
+        auth_user, @cluster_name, 'get_permissions'
+      )
+      if 404 == code
+        @error = 'Cluster is running an old version of pcsd which does not support permissions'
+      elsif 403 == code
+        @error = 'Permission denied'
+      elsif 200 != code
+        @error = 'Unable to load permissions of the cluster'
+      else
+        begin
+          permissions = JSON.parse(data)
+          if permissions['notoken'] or permissions['noresponse']
+            @error = 'Unable to load permissions of the cluster'
+          else
+            @permission_types = permissions['permission_types'] || []
+            @permissions_dependencies = permissions['permissions_dependencies'] || {}
+            @user_types = permissions['user_types'] || []
+            @users_permissions = permissions['users_permissions'] || []
+          end
+        rescue JSON::ParserError
+          @error = 'Unable to read permissions of the cluster'
+        end
+      end
+    end
+    erb :_permissions_cluster
+  end
+
+  get '/managec/:cluster/main' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    @cluster_name = params[:cluster]
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    @clusters = pcs_config.clusters
+    @nodes = get_cluster_nodes(params[:cluster])
+    if @nodes == []
+      redirect '/manage/'
+    end
+    @resource_agents = get_resource_agents_avail(auth_user, params)
+    @stonith_agents = get_stonith_agents_avail(auth_user, params)
+    erb :nodes, :layout => :main
+  end
+
+  post '/managec/:cluster/permissions_save/?' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    new_params = {
+      'json_data' => JSON.generate(params)
+    }
+    return send_cluster_request_with_token(
+      auth_user, params[:cluster], "set_permissions", true, new_params
+    )
+  end
+
+  get '/managec/:cluster/status_all' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    status_all(params, request, auth_user, get_cluster_nodes(params[:cluster]))
+  end
+
+  get '/managec/:cluster/cluster_status' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    cluster_status_gui(auth_user, params[:cluster])
+  end
+
+  get '/managec/:cluster/cluster_properties' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    cluster = params[:cluster]
+    unless cluster
+      return 200, {}
+    end
+    code, out = send_cluster_request_with_token(auth_user, cluster, 'get_cib')
+    if code == 403
+      return [403, 'Permission denied']
+    elsif code != 200
+      return [400, 'getting CIB failed']
+    end
+    begin
+      properties = getAllSettings(nil, REXML::Document.new(out))
+      code, out = send_cluster_request_with_token(
+        auth_user, cluster, 'get_cluster_properties_definition'
+      )
+
+      if code == 403
+        return [403, 'Permission denied']
+      elsif code == 404
+        definition = {
+          'batch-limit' => {
+            'name' => 'batch-limit',
+            'source' => 'pengine',
+            'default' => '0',
+            'type' => 'integer',
+            'shortdesc' => 'The number of jobs that pacemaker is allowed to execute in parallel.',
+            'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.',
+            'readable_name' => 'Batch Limit',
+            'advanced' => false
+          },
+          'no-quorum-policy' => {
+            'name' => 'no-quorum-policy',
+            'source' => 'pengine',
+            'default' => 'stop',
+            'type' => 'enum',
+            'enum' => ['stop', 'freeze', 'ignore', 'suicide'],
+            'shortdesc' => 'What to do when the cluster does not have quorum.',
+            'longdesc' => 'Allowed values:
+    * ignore - continue all resource management
+    * freeze - continue resource management, but don\'t recover resources from nodes not in the affected partition
+    * stop - stop all resources in the affected cluster partition
+    * suicide - fence all nodes in the affected cluster partition',
+            'readable_name' => 'No Quorum Policy',
+            'advanced' => false
+          },
+          'symmetric-cluster' => {
+            'name' => 'symmetric-cluster',
+            'source' => 'pengine',
+            'default' => 'true',
+            'type' => 'boolean',
+            'shortdesc' => 'All resources can run anywhere by default.',
+            'longdesc' => 'All resources can run anywhere by default.',
+            'readable_name' => 'Symmetric',
+            'advanced' => false
+          },
+          'stonith-enabled' => {
+            'name' => 'stonith-enabled',
+            'source' => 'pengine',
+            'default' => 'true',
+            'type' => 'boolean',
+            'shortdesc' => 'Failed nodes are STONITH\'d',
+            'longdesc' => 'Failed nodes are STONITH\'d',
+            'readable_name' => 'Stonith Enabled',
+            'advanced' => false
+          },
+          'stonith-action' => {
+            'name' => 'stonith-action',
+            'source' => 'pengine',
+            'default' => 'reboot',
+            'type' => 'enum',
+            'enum' => ['reboot', 'poweroff', 'off'],
+            'shortdesc' => 'Action to send to STONITH device',
+            'longdesc' => 'Action to send to STONITH device Allowed values: reboot, poweroff, off',
+            'readable_name' => 'Stonith Action',
+            'advanced' => false
+          },
+          'cluster-delay' => {
+            'name' => 'cluster-delay',
+            'source' => 'pengine',
+            'default' => '60s',
+            'type' => 'time',
+            'shortdesc' => 'Round trip delay over the network (excluding action execution)',
+            'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.',
+            'readable_name' => 'Cluster Delay',
+            'advanced' => false
+          },
+          'stop-orphan-resources' => {
+            'name' => 'stop-orphan-resources',
+            'source' => 'pengine',
+            'default' => 'true',
+            'type' => 'boolean',
+            'shortdesc' => 'Should deleted resources be stopped',
+            'longdesc' => 'Should deleted resources be stopped',
+            'readable_name' => 'Stop Orphan Resources',
+            'advanced' => false
+          },
+          'stop-orphan-actions' => {
+            'name' => 'stop-orphan-actions',
+            'source' => 'pengine',
+            'default' => 'true',
+            'type' => 'boolean',
+            'shortdesc' => 'Should deleted actions be cancelled',
+            'longdesc' => 'Should deleted actions be cancelled',
+            'readable_name' => 'top Orphan Actions',
+            'advanced' => false
+          },
+          'start-failure-is-fatal' => {
+            'name' => 'start-failure-is-fatal',
+            'source' => 'pengine',
+            'default' => 'true',
+            'type' => 'boolean',
+            'shortdesc' => 'Always treat start failures as fatal',
+            'longdesc' => 'This was the old default. However when set to FALSE, the cluster will instead use the resource\'s failcount and value for resource-failure-stickiness',
+            'readable_name' => 'Start Failure is Fatal',
+            'advanced' => false
+          },
+          'pe-error-series-max' => {
+            'name' => 'pe-error-series-max',
+            'source' => 'pengine',
+            'default' => '-1',
+            'type' => 'integer',
+            'shortdesc' => 'The number of PE inputs resulting in ERRORs to save',
+            'longdesc' => 'Zero to disable, -1 to store unlimited.',
+            'readable_name' => 'PE Error Storage',
+            'advanced' => false
+          },
+          'pe-warn-series-max' => {
+            'name' => 'pe-warn-series-max',
+            'source' => 'pengine',
+            'default' => '5000',
+            'type' => 'integer',
+            'shortdesc' => 'The number of PE inputs resulting in WARNINGs to save',
+            'longdesc' => 'Zero to disable, -1 to store unlimited.',
+            'readable_name' => 'PE Warning Storage',
+            'advanced' => false
+          },
+          'pe-input-series-max' => {
+            'name' => 'pe-input-series-max',
+            'source' => 'pengine',
+            'default' => '4000',
+            'type' => 'integer',
+            'shortdesc' => 'The number of other PE inputs to save',
+            'longdesc' => 'Zero to disable, -1 to store unlimited.',
+            'readable_name' => 'PE Input Storage',
+            'advanced' => false
+          },
+          'enable-acl' => {
+            'name' => 'enable-acl',
+            'source' => 'cib',
+            'default' => 'false',
+            'type' => 'boolean',
+            'shortdesc' => 'Enable CIB ACL',
+            'longdesc' => 'Should pacemaker use ACLs to determine access to cluster',
+            'readable_name' => 'Enable ACLs',
+            'advanced' => false
+          },
+        }
+      elsif code != 200
+        return [400, 'getting properties definition failed']
+      else
+        definition = JSON.parse(out)
+      end
+  
+      definition.each { |name, prop|
+        prop['value'] = properties[name]
+      }
+      return [200, JSON.generate(definition)]
+    rescue
+      return [400, 'unable to get cluster properties']
+    end
+  end
+
+  post '/managec/:cluster/fix_auth_of_cluster' do
+    clustername = params[:cluster]
+    unless clustername
+      return [400, "cluster name not defined"]
+    end
+
+    nodes = get_cluster_nodes(clustername)
+    tokens_data = add_prefix_to_keys(get_tokens_of_nodes(nodes), "node:")
+
+    retval, out = send_cluster_request_with_token(
+      PCSAuth.getSuperuserAuth(), clustername, "/save_tokens", true,
+      tokens_data, true
+    )
+    if retval == 404
+      return [400, "Old version of PCS/PCSD is running on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."]
+    elsif retval != 200
+      return [400, "Authentication failed."]
+    end
+    return [200, "Auhentication of nodes in cluster should be fixed."]
+  end
+
+  post '/managec/:cluster/add_node_to_cluster' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    clustername = params[:cluster]
+    new_node = params["new_nodename"]
+
+    if clustername == $cluster_name
+      if not allowed_for_local_cluster(auth_user, Permissions::FULL)
+        return 403, 'Permission denied'
+      end
+    end
+
+    tokens = read_tokens
+
+    if not tokens.include? new_node
+      return [400, "New node is not authenticated."]
+    end
+
+    # Save the new node token on all nodes in a cluster the new node is beeing
+    # added to. Send the token to one node and let the cluster nodes synchronize
+    # it by themselves.
+    token_data = {"node:#{new_node}" => tokens[new_node]}
+    retval, out = send_cluster_request_with_token(
+      # new node doesn't have config with permissions yet
+      PCSAuth.getSuperuserAuth(), clustername, '/save_tokens', true, token_data
+    )
+    # If the cluster runs an old pcsd which doesn't support /save_tokens,
+    # ignore 404 in order to not prevent the node to be added.
+    if retval != 404 and retval != 200
+      return [400, 'Failed to save the token of the new node in target cluster.']
+    end
+
+    retval, out = send_cluster_request_with_token(
+      auth_user, clustername, "/add_node_all", true, params
+    )
+    if 403 == retval
+      return [retval, out]
+    end
+    if retval != 200
+      return [400, "Failed to add new node '#{new_node}' into cluster '#{clustername}': #{out}"]
+    end
+
+    return [200, "Node added successfully."]
+  end
+
+  post '/managec/:cluster/?*' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    raw_data = request.env["rack.input"].read
+    if params[:cluster]
+      request = "/" + params[:splat].join("/")
+      code, out = send_cluster_request_with_token(
+        auth_user, params[:cluster], request, true, params, true, raw_data
+      )
+
+      # backward compatibility layer BEGIN
+      # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older
+      redirection = {
+          "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
+          "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
+      }
+      if code == 404 and redirection.key?(request)
+        code, out = send_cluster_request_with_token(
+          auth_user,
+          params[:cluster],
+          redirection[request],
+          true,
+          params,
+          false,
+          raw_data
+        )
+      end
+      # bcl END
+      return code, out
+    end
+  end
+
+  get '/managec/:cluster/?*' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    raw_data = request.env["rack.input"].read
+    if params[:cluster]
+      send_cluster_request_with_token(
+        auth_user,
+        params[:cluster],
+        "/" + params[:splat].join("/"),
+        false,
+        params,
+        true,
+        raw_data
+      )
     end
-    # do not reload nor redirect as that's done in js which called this
   end
 
   get '/' do
@@ -727,48 +1089,6 @@ else
 
 end
 
-def getConfigOptions2(session, cluster_nodes)
-  config_options = {}
-  general_page = []
-#  general_page << ConfigOption.new("Cluster Delay Time", "cluster-delay",  "int", 4, "Seconds") 
-#  general_page << ConfigOption.new("Batch Limit", "cdt",  "int", 4) 
-#  general_page << ConfigOption.new("Default Action Timeout", "cdt",  "int", 4, "Seconds") 
-#  general_page << ConfigOption.new("During timeout should cluster stop all active resources", "res_stop", "radio", "4", "", ["Yes","No"])
-
-#  general_page << ConfigOption.new("PE Error Storage", "res_stop", "radio", "4", "", ["Yes","No"])
-#  general_page << ConfigOption.new("PE Warning Storage", "res_stop", "radio", "4", "", ["Yes","No"])
-#  general_page << ConfigOption.new("PE Input Storage", "res_stop", "radio", "4", "", ["Yes","No"])
-  config_options["general"] = general_page
-
-  pacemaker_page = []
-  pacemaker_page << ConfigOption.new("Batch Limit", "batch-limit",  "int", 4, "jobs", {},  'The number of jobs that pacemaker is allowed to execute in parallel. The "correct" value will depend on the speed and load of your network and cluster nodes.')
-  pacemaker_page << ConfigOption.new("No Quorum Policy", "no-quorum-policy",  "dropdown","" ,"", {"ignore" => "Ignore","freeze" => "Freeze", "stop" => "Stop", "suicide" => "Suicide"}, 'What to do when the cluster does not have quorum. Allowed values:
-  * ignore - continue all resource management
-  * freeze - continue resource management, but don\'t recover resources from nodes not in the affected partition
-  * stop - stop all resources in the affected cluster partition
-  * suicide - fence all nodes in the affected cluster partition')
-  pacemaker_page << ConfigOption.new("Symmetric", "symmetric-cluster", "check",nil ,nil,nil,'Can all resources run on any node by default?')
-  pacemaker_page << ConfigOption.new("Stonith Enabled", "stonith-enabled", "check",nil,nil,nil,'Should failed nodes and nodes with resources that can\'t be stopped be shot? If you value your data, set up a STONITH device and enable this.
-If checked, the cluster will refuse to start resources unless one or more STONITH resources have been configured also.')
-  pacemaker_page << ConfigOption.new("Stonith Action", "stonith-action",  "dropdown","" ,"", {"reboot" => "Reboot","off" => "Off", "poweroff" => "Poweroff"},'Action to send to STONITH device. Allowed values: reboot, off. The value poweroff is also allowed, but is only used for legacy devices.') 
-  pacemaker_page << ConfigOption.new("Cluster Delay", "cluster-delay",  "int", 4,nil,nil,'Round trip delay over the network (excluding action execution). The "correct" value will depend on the speed and load of your network and cluster nodes.') 
-  pacemaker_page << ConfigOption.new("Stop Orphan Resources", "stop-orphan-resources", "check",nil,nil,nil,'Should deleted resources be stopped?')
-  pacemaker_page << ConfigOption.new("Stop Orphan Actions", "stop-orphan-actions", "check",nil,nil,nil,'Should deleted actions be cancelled?'
-                                    )
-  pacemaker_page << ConfigOption.new("Start Failure is Fatal", "start-failure-is-fatal", "check",nil,nil,nil,'When unchecked, the cluster will instead use the resource\'s failcount and value for resource-failure-stickiness.')
-  pacemaker_page << ConfigOption.new("PE Error Storage", "pe-error-series-max", "int", "4",nil,nil,'The number of policy engine (PE) inputs resulting in ERRORs to save. Used when reporting problems.')
-  pacemaker_page << ConfigOption.new("PE Warning Storage", "pe-warn-series-max", "int", "4",nil,nil,'The number of PE inputs resulting in WARNINGs to save. Used when reporting problems.')
-  pacemaker_page << ConfigOption.new("PE Input Storage", "pe-input-series-max", "int", "4",nil,nil,'The number of "normal" PE inputs to save. Used when reporting problems.')
-  pacemaker_page << ConfigOption.new("Enable ACLs", "enable-acl", "check", nil,nil,nil,'Should pacemaker use ACLs to determine access to cluster')
-  config_options["pacemaker"] = pacemaker_page
-
-  allconfigoptions = []
-  config_options.each { |i,k| k.each { |j| allconfigoptions << j } }
-  ConfigOption.getDefaultValues(allconfigoptions)
-  ConfigOption.loadValues(session, allconfigoptions, cluster_nodes)
-  return config_options
-end
-
 class Node
   attr_accessor :active, :id, :name, :hostname
 
@@ -777,114 +1097,6 @@ class Node
   end
 end
 
-
-class ConfigOption
-  attr_accessor :name, :configname, :type, :size, :units, :options, :default, :value, :desc
-  def initialize(name, configname, type="str", size = 10, units = "", options = [], desc = "")
-    @name = name
-    @configname = configname
-    @type = type
-    @size = size
-    @units = units
-    @options = options
-    @desc = desc
-  end
-
-  def self.loadValues(session, cos, node_list)
-    code, output = send_nodes_request_with_token(session, node_list, "get_cib")
-    $logger.info(code)
-    if code != 200
-      $logger.info "Error: unable to load cib"
-      $logger.info output
-      return
-    end
-
-    doc = REXML::Document.new(output)
-
-    cos.each {|co|
-      prop_found = false
-      doc.elements.each("cib/configuration/crm_config/cluster_property_set/nvpair[@name='#{co.configname}']") { |e|
-        co.value = e.attributes["value"]
-        prop_found = true
-      }
-      if prop_found == false
-        co.value = co.default
-      end
-    }
-  end
-
-  def self.getDefaultValues(cos)
-    [PENGINE, CIB_BINARY].each { |command|
-      metadata = `#{command} metadata`
-      begin
-        doc = REXML::Document.new(metadata)
-        cos.each { |co|
-          doc.elements.each("resource-agent/parameters/parameter[@name='#{co.configname}']/content") { |e|
-            co.default = e.attributes["default"]
-            break
-          }
-        }
-      rescue
-        $logger.error("Failed to parse #{command} metadata")
-      end
-    }
-  end
-
-  def checked(option)
-    case type
-    when "radio"
-      val = value
-      if option == "Yes"
-        if val == "true"
-          return "checked"
-        end
-      else
-        if val == "false"
-          return "checked"
-        end
-      end
-    when "check"
-      if value == "true" || value == "on"
-        return "checked"
-      else
-        return ""
-      end
-    when "dropdown"
-      if value == option
-        return "selected"
-      end
-    end
-  end
-
-  def html
-    paramname = "config[#{configname}]"
-    hidden_paramname = "hidden[#{configname}]"
-    case type
-    when "int"
-      return "<input name=\"#{paramname}\" value=\"#{value}\" type=text size=#{size}>"
-    when "str"
-      return "<input name=\"#{paramname}\" value=\"#{value}\" type=text size=#{size}>"
-    when "radio"
-      ret = ""
-      options.each {|option|
-        ret += "<input type=radio #{checked(option)} name=\"#{paramname}\" value=\"#{option}\">#{option}"
-      }
-      return ret
-    when "check"
-      ret = "<input type=checkbox name=\"#{paramname}\" " + self.checked(nil) + ">"
-      ret += "<input type=hidden name=\"#{hidden_paramname}\" value=\"off\">"
-      return ret
-    when "dropdown"
-      ret = "<select name=\"#{paramname}\">"
-      options.each {|key, option|
-        ret += "<option #{checked(key)} value=\"#{key}\">#{option}</option>"
-      }
-      ret += "</select>"
-      return ret
-    end
-  end
-end
-
 helpers do
   def h(text)
     Rack::Utils.escape_html(text)
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index c3e8e80..237d7ce 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -69,7 +69,7 @@ Pcs = Ember.Application.createWithMixins({
         return;
       }
       Ember.debug("Empty Cluster Name");
-      $.ajax({
+      ajax_wrapper({
         url: "/clusters_overview",
         dataType: "json",
         timeout: 20000,
@@ -102,13 +102,12 @@ Pcs = Ember.Application.createWithMixins({
       });
       return;
     }
-    $.ajax({
+    ajax_wrapper({
       url: "cluster_status",
       dataType: "json",
       success: function(data) {
         Pcs.resourcesContainer.update(data);
         Pcs.nodesController.update(data);
-        Pcs.settingsController.update(data);
         Pcs.aclsController.update(data);
         Pcs.set("cluster_settings",data.cluster_settings);
         Pcs.set('need_ring1_address', false);
@@ -127,6 +126,7 @@ Pcs = Ember.Application.createWithMixins({
           var cur_resource = self.get('cur_resource');
           var resource_map = self.get('resource_map');
           if (first_run) {
+            refresh_cluster_properties();
             setup_node_links();
             Pcs.nodesController.load_node($('#node_list_row').find('.node_selected').first(),true);
             Pcs.aclsController.load_role($('#acls_list_row').find('.node_selected').first(), true);
@@ -205,6 +205,47 @@ Pcs = Ember.Application.createWithMixins({
   }
 });
 
+Pcs.ValueSelectorComponent = Ember.Component.extend({
+  tagName: 'select',
+  attributeBindings: ['name'],
+  name: null,
+  prompt: "Select one value",
+  show_prompt: true,
+  content: [],
+  value: null,
+  _change: function() {
+    var selectedIndex = this.$()[0].selectedIndex,
+      content = this.get('content'),
+      prompt = this.get('show_prompt');
+
+    if (!content || !content.get('length')) { return; }
+    if (prompt && selectedIndex === 0) { this.set('value', ""); return; }
+
+    if (prompt) { selectedIndex -= 1; }
+    this.set('value', content.objectAt(selectedIndex)['value']);
+  },
+  init: function() {
+    this._super();
+    this.on("change", this, this._change);
+  }
+});
+
+Pcs.ClusterPropertyComponent = Ember.Component.extend({
+  tagName: 'tr',
+  prop: null,
+  attributeBindings: ['name'],
+  boolean_options: [
+    {
+      name: "true",
+      value: "true"
+    },
+    {
+      name: "false",
+      value: "false"
+    }
+  ]
+});
+
 Pcs.UtilizationTableComponent = Ember.Component.extend({
   entity: null,
   type: "node", // node or resource
@@ -461,7 +502,7 @@ Pcs.resourcesContainer = Ember.Object.create({
       value: value
     };
 
-    $.ajax({
+    ajax_wrapper({
       type: 'POST',
       url: get_cluster_remote_url() + 'add_meta_attr_remote',
       data: data,
@@ -479,7 +520,10 @@ Pcs.resourcesContainer = Ember.Object.create({
   },
 
   enable_resource: function(resource_id) {
-    $.ajax({
+    if (resource_id == null) {
+      return;
+    }
+    ajax_wrapper({
       type: 'POST',
       url: get_cluster_remote_url() + 'resource_start',
       data: {resource: resource_id},
@@ -502,7 +546,10 @@ Pcs.resourcesContainer = Ember.Object.create({
   },
 
   disable_resource: function(resource_id) {
-    $.ajax({
+    if (resource_id == null) {
+      return;
+    }
+    ajax_wrapper({
       type: 'POST',
       url: get_cluster_remote_url() + 'resource_stop',
       data: {resource: resource_id},
@@ -1057,8 +1104,47 @@ Pcs.ResourcesRoute = Ember.Route.extend({
 
 Pcs.Setting = Ember.Object.extend({
   name: null,
+  readable_name: null,
+  form_name: function() {
+    return "config[" + this.get("name") + "]";
+  }.property("name"),
   value: null,
-  type: null
+  cur_val: Ember.computed.oneWay('value'),
+  type: null,
+  source: "",
+  default: null,
+  advanced: false,
+  longdesc: "",
+  shortdesc: "",
+  description: function() {
+    var self = this;
+    var desc = $("<div>").text(self.get("shortdesc")).html();
+    if (self.get("longdesc")) {
+      desc += "<br><br>";
+      desc += $("<div>").text(self.get("longdesc")).html();
+    }
+    desc += "<br><br>";
+    desc += $("<div>").text("Default value: " + self.get("default")).html();
+    return desc;
+  }.property("longdesc", "shortdesc"),
+  is_boolean: function() {
+    return (this.get("type") == "boolean");
+  }.property("type"),
+  is_enum: function() {
+    return (this.get("type") == "enum");
+  }.property("type"),
+  enum: [],
+  enum_show: function() {
+    var self = this;
+    var out = [];
+    $.each(self.get("enum"), function(_, val) {
+      out.push({
+        name: val,
+        value: val
+      });
+    });
+    return out;
+  }.property("enum. at each")
 });
 
 Pcs.Clusternode = Ember.Object.extend({
@@ -1657,24 +1743,74 @@ Pcs.aclsController = Ember.ArrayController.createWithMixins({
   }
 });
 
-Pcs.settingsController = Ember.ArrayController.create({
-  content: [],
-  update: function(data) {
+Pcs.settingsController = Ember.Controller.create({
+  properties: [],
+  filtered: [],
+  show_advanced: false,
+  filter: "",
+  update: function(properties_definition) {
     var self = this;
-    var settings = {};
-    self.set('content',[]);
-    if (data["cluster_settings"]) {
-      $.each(data["cluster_settings"], function(k2, v2) {
-        var setting = Pcs.Setting.create({
-          name: k2,
-          value: v2
-        });
-        self.pushObject(setting);
-      });
-    }
+    var new_properties = [];
+    var property;
+    var value;
+    $.each(properties_definition, function(_, prop_def) {
+      property = Pcs.Setting.create(prop_def);
+      value = property.get("value");
+      if (value) {
+        switch (property.get("type")) {
+          case "boolean":
+            value = (is_cib_true(value)) ? "true" : "false";
+            break;
+          case "enum":
+            if (property.get("enum").indexOf(value) == -1) {
+              property.get("enum").push(value);
+            }
+        }
+        property.set("value", value);
+      }
+      new_properties.pushObject(property);
+    });
+    // first basic and then advanced
+    self.set("properties", new_properties.sort(function(a,b) {
+      if (!a.get("advanced") && b.get("advanced")) {
+        return -1;
+      } else if (a.get("advanced") && !b.get("advanced")) {
+        return 1;
+      } else {
+        return a.get('name').localeCompare(b.get('name'));
+      }
+    }));
   }
 });
 
+Pcs.settingsController.reopen({
+  filtered: function() {
+    var self = this;
+    var substr = self.get("filter").toLowerCase();
+    
+    var to_show = [];
+    $.each(self.get("properties"), function(_, e) {
+      if (self.get("show_advanced")) {
+        to_show.pushObject(e);
+      } else if (!e.get("advanced")) {
+        to_show.pushObject(e);
+      }
+    });
+
+    if (!substr) {
+      return to_show;
+    }
+    
+    var filtered = [];
+    $.each(to_show, function(_, e) {
+      if (e.get("name").toLowerCase().includes(substr) || e.get("readable_name").toLowerCase().includes(substr)) {
+        filtered.pushObject(e);
+      }
+    });
+    return filtered;
+  }.property("properties", "filter", "show_advanced")
+});
+
 Pcs.selectedNodeController = Ember.Object.createWithMixins({
   node: null,
   reset: function() {
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index 4412e49..25cc1ac 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -1,11 +1,21 @@
 var pcs_timeout = 30000;
+var login_dialog_opened = false;
+var ajax_queue = Array();
 
 function curResource() {
-  return Pcs.resourcesContainer.get('cur_resource').get('id')
+  var obj = Pcs.resourcesContainer.get('cur_resource');
+  if (obj == null) {
+    return null;
+  }
+  return obj.get('id');
 }
 
 function curStonith() {
-  return Pcs.resourcesContainer.get('cur_fence').get('id')
+  var obj = Pcs.resourcesContainer.get('cur_fence');
+  if (obj == null) {
+    return null;
+  }
+  return obj.get('id');
 }
 
 function configure_menu_show(item) {
@@ -112,7 +122,7 @@ function create_group() {
       "Create Group": function() {
         var data = $('#add_group > form').serialize();
         var url = get_cluster_remote_url() + "add_group";
-        $.ajax({
+        ajax_wrapper({
           type: "POST",
           url: url,
           data: data,
@@ -179,9 +189,9 @@ function checkAddingNode(){
     return false;
   }
 
-  $.ajax({
-    type: 'POST',
-    url: '/remote/check_gui_status',
+  ajax_wrapper({
+    type: 'GET',
+    url: '/manage/check_pcsd_status',
     data: {"nodes": nodeName},
     timeout: pcs_timeout,
     success: function (data) {
@@ -205,10 +215,9 @@ function checkAddingNode(){
 
 function create_node(form) {
   var dataString = $(form).serialize();
-  dataString += "&clustername=" + get_cluster_name();
-  $.ajax({
+  ajax_wrapper({
     type: "POST",
-    url: "/remote/add_node_to_cluster",
+    url: get_cluster_remote_url() + "add_node_to_cluster",
     data: dataString,
     success: function(returnValue) {
       $("#add_node_submit_btn").button("option", "disabled", false);
@@ -235,7 +244,7 @@ function create_resource(form, update, stonith) {
   else
     name = "resource"
 
-  $.ajax({
+  ajax_wrapper({
     type: "POST",
     url: url,
     data: dataString,
@@ -417,26 +426,6 @@ function get_checked_ids_from_nodelist(nodelist_id) {
   return ids;
 }
 
-function remote_node_update() {
-  node = $('#node_info_header_title_name').first().text();
-  $.ajax({
-    type: 'GET',
-    url: '/remote/status_all',
-    timeout: pcs_timeout,
-    success: function (data) {
-
-      data = jQuery.parseJSON(data);
-      node_data = data[node];
-
-      local_node_update(node, data);
-//      window.setTimeout(remote_node_update,pcs_timeout);
-    },
-    error: function (XMLHttpRequest, textStatus, errorThrown) {
-//      window.setTimeout(remote_node_update, 60000);
-    }
-  });
-}
-
 function local_node_update(node, data) {
   node_data = data[node];
 
@@ -495,7 +484,7 @@ function fade_in_out(id) {
 function node_link_action(link_selector, url, label) {
   var node = $.trim($("#node_info_header_title_name").text());
   fade_in_out(link_selector);
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: url,
     data: {"name": node},
@@ -513,7 +502,9 @@ function node_link_action(link_selector, url, label) {
 function setup_node_links() {
   Ember.debug("Setup node links");
   $("#node_start").click(function() {
-    node_link_action("#node_start", "/remote/cluster_start", "start");
+    node_link_action(
+      "#node_start", get_cluster_remote_url() + "cluster_start", "start"
+    );
   });
   $("#node_stop").click(function() {
     var node = $.trim($("#node_info_header_title_name").text());
@@ -521,13 +512,21 @@ function setup_node_links() {
     node_stop(node, false);
   });
   $("#node_restart").click(function() {
-    node_link_action("#node_restart", "/remote/node_restart", "restart");
+    node_link_action(
+      "#node_restart", get_cluster_remote_url() + "node_restart", "restart"
+    );
   });
   $("#node_standby").click(function() {
-    node_link_action("#node_standby", "/remote/node_standby", "standby");
+    node_link_action(
+      "#node_standby", get_cluster_remote_url() + "node_standby", "standby"
+    );
   });
   $("#node_unstandby").click(function() {
-    node_link_action("#node_unstandby", "/remote/node_unstandby", "unstandby");
+    node_link_action(
+      "#node_unstandby",
+      get_cluster_remote_url() + "node_unstandby",
+      "unstandby"
+    );
   });
 }
 
@@ -537,9 +536,9 @@ function node_stop(node, force) {
   if (force) {
     data["force"] = force;
   }
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
-    url: '/remote/cluster_stop',
+    url: get_cluster_remote_url() + 'cluster_stop',
     data: data,
     timeout: pcs_timeout,
     success: function() {
@@ -555,7 +554,7 @@ function node_stop(node, force) {
         */
         return;
       }
-      var message = "Unable to stop node '" + node + " " + ajax_simple_error(
+      var message = "Unable to stop node '" + node + "' " + ajax_simple_error(
         xhr, status, error
       );
       if (message.indexOf('--force') == -1) {
@@ -583,8 +582,11 @@ function disable_resource() {
 
 function cleanup_resource() {
   var resource = curResource();
+  if (resource == null) {
+    return;
+  }
   fade_in_out("#resource_cleanup_link");
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'resource_cleanup',
     data: {"resource": resource},
@@ -601,8 +603,11 @@ function cleanup_resource() {
 
 function cleanup_stonith() {
   var resource = curStonith();
+  if (resource == null) {
+    return;
+  }
   fade_in_out("#stonith_cleanup_link");
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'resource_cleanup',
     data: {"resource": resource},
@@ -623,9 +628,9 @@ function checkExistingNode() {
     node = e.value;
   });
 
-  $.ajax({
-    type: 'POST',
-    url: '/remote/check_gui_status',
+  ajax_wrapper({
+    type: 'GET',
+    url: '/manage/check_pcsd_status',
     data: {"nodes": node},
     timeout: pcs_timeout,
     success: function (data) {
@@ -647,16 +652,16 @@ function checkClusterNodes() {
     }
   });
 
-  $.ajax({
-    type: 'POST',
-    url: '/remote/check_gui_status',
+  ajax_wrapper({
+    type: 'GET',
+    url: '/manage/check_pcsd_status',
     data: {"nodes": nodes.join(",")},
     timeout: pcs_timeout,
     success: function (data) {
       mydata = jQuery.parseJSON(data);
-      $.ajax({
-        type: 'POST',
-        url: '/remote/get_sw_versions',
+      ajax_wrapper({
+        type: 'GET',
+        url: '/manage/get_nodes_sw_versions',
         data: {"nodes": nodes.join(",")},
         timeout: pcs_timeout,
         success: function(data) {
@@ -676,9 +681,9 @@ function checkClusterNodes() {
 
 function auth_nodes(dialog) {
   $("#auth_failed_error_msg").hide();
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
-    url: '/remote/auth_gui_against_nodes',
+    url: '/manage/auth_gui_against_nodes',
     data: dialog.find("#auth_nodes_form").serialize(),
     timeout: pcs_timeout,
     success: function (data) {
@@ -849,7 +854,23 @@ function add_existing_dialog() {
 function update_existing_cluster_dialog(data) {
   for (var i in data) {
     if (data[i] == "Online") {
-      $('#add_existing_cluster_form').submit();
+      ajax_wrapper({
+        type: "POST",
+        url: "/manage/existingcluster",
+        timeout: pcs_timeout,
+        data: $('#add_existing_cluster_form').serialize(),
+        success: function(data) {
+          if (data) {
+            alert("Operation Successful!\n\nWarnings:\n" + data);
+          }
+          $("#add_existing_cluster.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
+          Pcs.update();
+        },
+        error: function (xhr, status, error) {
+          alert(xhr.responseText);
+          $("#add_existing_submit_btn").button("option", "disabled", false);
+        }
+      });
       return;
     } else if (data[i] == "Unable to authenticate") {
       auth_nodes_dialog([i], function() {$("#add_existing_submit_btn").trigger("click");});
@@ -985,7 +1006,6 @@ function update_create_cluster_dialog(nodes, version_info) {
   else {
     $("#addr0_addr1_mismatch_error_msg").hide();
   }
-
   if(versions) {
     if(cman_nodes.length > 0 && transport == "udpu") {
       if(noncman_nodes.length < 1 && ring1_nodes.length < 1) {
@@ -1025,7 +1045,23 @@ function update_create_cluster_dialog(nodes, version_info) {
   }
 
   if (good_nodes != 0 && cant_connect_nodes == 0 && cant_auth_nodes.length == 0 && cluster_name != "" && addr1_match == 1 && versions_check_ok == 1) {
-    $('#create_new_cluster_form').submit();
+    ajax_wrapper({
+      type: "POST",
+      url: "/manage/newcluster",
+      timeout: pcs_timeout,
+      data: $('#create_new_cluster_form').serialize(),
+      success: function(data) {
+        if (data) {
+          alert("Operation Successful!\n\nWarnings:\n" + data);
+        }
+        $("#create_new_cluster.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
+        Pcs.update();
+      },
+      error: function (xhr, status, error) {
+        alert(xhr.responseText);
+        $("#create_cluster_submit_btn").button("option", "disabled", false);
+      }
+    });
   } else {
     $("#create_cluster_submit_btn").button("option", "disabled", false);
   }
@@ -1167,7 +1203,7 @@ function load_agent_form(resource_id, stonith) {
 
   var data = {resource: resource_id};
 
-  $.ajax({
+  ajax_wrapper({
     type: 'GET',
     url: url,
     data: data,
@@ -1207,14 +1243,14 @@ function remove_cluster(ids) {
   $.each(ids, function(_, cluster) {
     data[ "clusterid-" + cluster] = true;
   });
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: '/manage/removecluster',
     data: data,
     timeout: pcs_timeout,
     success: function () {
       $("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
-      location.reload();
+      Pcs.update();
     },
     error: function (xhr, status, error) {
       alert("Unable to remove cluster: " + res + " ("+error+")");
@@ -1232,7 +1268,7 @@ function remove_nodes(ids, force) {
     data["force"] = force;
   }
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'remove_nodes',
     data: data,
@@ -1273,21 +1309,21 @@ function remove_nodes(ids, force) {
 }
 
 function remove_resource(ids, force) {
-  var data = {
-    no_error_if_not_exists: true
-  };
+  var data = {};
   if (force) {
     data["force"] = force;
   }
-  var res = "";
-  for (var i=0; i<ids.length; i++) {
-    res += ids[i] + ", ";
-    var resid_name = "resid-" + ids[i];
-    data[resid_name] = true;
-  }
-  res = res.slice(0,-2);
+  var res_obj;
+  $.each(ids, function(_, id) {
+    res_obj = Pcs.resourcesContainer.get_resource_by_id(id);
+    if (!res_obj) {
+      return true; // continue
+    } else if ($.inArray(res_obj.get("parent_id"), ids) == -1) {
+      data["resid-" + id] = true;
+    }
+  });
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'remove_resource',
     data: data,
@@ -1328,7 +1364,7 @@ function add_remove_fence_level(parent_id,remove) {
     data["node"] = Pcs.nodesController.cur_node.name;
   }
   fade_in_out(parent_id.parent());
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'add_fence_level_remote',
     data: data,
@@ -1369,7 +1405,7 @@ function remove_node_attr(parent_id) {
   data["value"] = ""; // empty value will remove attribute
   fade_in_out(parent_id.parent());
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'add_node_attr_remote',
     data: data,
@@ -1394,7 +1430,7 @@ function add_node_attr(parent_id) {
   data["value"] = $(parent_id + " input[name='new_node_attr_value']").val();
   fade_in_out($(parent_id));
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'add_node_attr_remote',
     data: data,
@@ -1419,7 +1455,7 @@ function node_maintenance(node) {
     key: "maintenance",
     value: "on"
   };
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'add_node_attr_remote',
     data: data,
@@ -1442,7 +1478,7 @@ function node_unmaintenance(node) {
     key: "maintenance",
     value: ""
   };
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'add_node_attr_remote',
     data: data,
@@ -1461,6 +1497,9 @@ function node_unmaintenance(node) {
 
 function remove_meta_attr(parent_id) {
   var resource_id = curResource();
+  if (resource_id == null) {
+    return;
+  }
   var attr = parent_id.attr("meta_attr_key");
   fade_in_out(parent_id.parent());
   Pcs.resourcesContainer.update_meta_attr(resource_id, attr);
@@ -1468,6 +1507,9 @@ function remove_meta_attr(parent_id) {
 
 function add_meta_attr(parent_id) {
   var resource_id = curResource();
+  if (resource_id == null) {
+    return;
+  }
   var attr = $(parent_id + " input[name='new_meta_key']").val();
   var value = $(parent_id + " input[name='new_meta_value']").val();
   fade_in_out($(parent_id));
@@ -1493,7 +1535,7 @@ function add_constraint(parent_id, c_type, force) {
   }
   fade_in_out($(parent_id));
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + (
       data['node_id'] && (data['node_id'].trim().indexOf(' ') != -1)
@@ -1552,7 +1594,7 @@ function add_constraint_set(parent_id, c_type, force) {
   }
   fade_in_out($(parent_id))
 
-  $.ajax({
+  ajax_wrapper({
     type: "POST",
     url: get_cluster_remote_url() + "add_constraint_set_remote",
     data: data,
@@ -1601,7 +1643,7 @@ function reset_constraint_set_form(parent_id) {
 
 function remove_constraint(id) {
   fade_in_out($("[constraint_id='"+id+"']").parent());
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'remove_constraint_remote',
     data: {"constraint_id": id},
@@ -1620,7 +1662,7 @@ function remove_constraint(id) {
 
 function remove_constraint_rule(id) {
   fade_in_out($("[rule_id='"+id+"']").parent());
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'remove_constraint_rule_remote',
     data: {"rule_id": id},
@@ -1641,7 +1683,7 @@ function add_acl_role(form) {
   var data = {}
   data["name"] = $(form).find("input[name='name']").val().trim();
   data["description"] = $(form).find("input[name='description']").val().trim();
-  $.ajax({
+  ajax_wrapper({
     type: "POST",
     url: get_cluster_remote_url() + "add_acl_role",
     data: data,
@@ -1664,7 +1706,7 @@ function remove_acl_roles(ids) {
   for (var i = 0; i < ids.length; i++) {
     data["role-" + i] = ids[i];
   }
-  $.ajax({
+  ajax_wrapper({
     type: "POST",
     url: get_cluster_remote_url() + "remove_acl_roles",
     data: data,
@@ -1707,7 +1749,7 @@ function add_acl_item(parent_id, item_type) {
       break;
   }
   fade_in_out($(parent_id));
-  $.ajax({
+  ajax_wrapper({
     type: "POST",
     url: get_cluster_remote_url() + 'add_acl',
     data: data,
@@ -1743,7 +1785,7 @@ function remove_acl_item(id,item) {
       break;
   }
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'remove_acl',
     data: data,
@@ -1760,23 +1802,54 @@ function remove_acl_item(id,item) {
   });
 }
 
-function update_cluster_settings(form) {
-  var data = form.serialize();
-  $('html, body, form, :input, :submit').css("cursor","wait");
-  $.ajax({
+function update_cluster_settings() {
+  $("#cluster_properties button").prop("disabled", true);
+  var data = {
+    'hidden[hidden_input]': null // this is needed for backward compatibility 
+  };
+  $.each(Pcs.settingsController.get("properties"), function(_, prop) {
+    data[prop.get("form_name")] = prop.get("cur_val");
+  });
+  show_loading_screen();
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'update_cluster_settings',
     data: data,
     timeout: pcs_timeout,
     success: function() {
-      window.location.reload();
+      refresh_cluster_properties();
     },
     error: function (xhr, status, error) {
       alert(
         "Error updating configuration "
         + ajax_simple_error(xhr, status, error)
       );
-      $('html, body, form, :input, :submit').css("cursor","auto");
+      hide_loading_screen();
+      $("#cluster_properties button").prop("disabled", false);
+    }
+  });
+}
+
+function refresh_cluster_properties() {
+  Pcs.settingsController.set("filter", "");
+  $("#cluster_properties button").prop("disabled", true);
+  ajax_wrapper({
+    url: get_cluster_remote_url() + "cluster_properties",
+    timeout: pcs_timeout,
+    dataType: "json",
+    success: function(data) {
+      Pcs.settingsController.update(data);
+    },
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to get cluster properties: "
+        + ajax_simple_error(xhr, status, error)
+      );
+      Pcs.settingsController.update({});
+    },
+    complete: function() {
+      hide_loading_screen();
+      $("#cluster_properties button").prop("disabled", false);
     }
   });
 }
@@ -1961,10 +2034,9 @@ function htmlEncode(s)
 function fix_auth_of_cluster() {
   show_loading_screen();
   var clustername = Pcs.clusterController.cur_cluster.name;
-  $.ajax({
-    url: "/remote/fix_auth_of_cluster",
+  ajax_wrapper({
+    url: get_cluster_remote_url(clustername) + "fix_auth_of_cluster",
     type: "POST",
-    data: "clustername=" + clustername,
     success: function(data) {
       hide_loading_screen();
       Pcs.update();
@@ -2055,8 +2127,11 @@ function tree_view_checkbox_onchange(element) {
 }
 
 function resource_master(resource_id) {
+  if (resource_id == null) {
+    return;
+  }
   show_loading_screen();
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'resource_master',
     data: {resource_id: resource_id},
@@ -2074,8 +2149,11 @@ function resource_master(resource_id) {
 }
 
 function resource_clone(resource_id) {
+  if (resource_id == null) {
+    return;
+  }
   show_loading_screen();
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'resource_clone',
     data: {resource_id: resource_id},
@@ -2093,12 +2171,15 @@ function resource_clone(resource_id) {
 }
 
 function resource_unclone(resource_id) {
+  if (resource_id == null) {
+    return;
+  }
   show_loading_screen();
   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
   if (resource_obj.get('class_type') == 'clone') {
     resource_id = resource_obj.get('member').get('id');
   }
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'resource_unclone',
     data: {resource_id: resource_id},
@@ -2116,8 +2197,11 @@ function resource_unclone(resource_id) {
 }
 
 function resource_ungroup(group_id) {
+  if (group_id == null) {
+    return;
+  }
   show_loading_screen();
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'resource_ungroup',
     data: {group_id: group_id},
@@ -2135,6 +2219,9 @@ function resource_ungroup(group_id) {
 }
 
 function resource_change_group(resource_id, group_id) {
+  if (resource_id == null) {
+    return;
+  }
   show_loading_screen();
   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
   var data = {
@@ -2151,7 +2238,7 @@ function resource_change_group(resource_id, group_id) {
     }
   }
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: get_cluster_remote_url() + 'resource_change_group',
     data: data,
@@ -2180,6 +2267,137 @@ function ajax_simple_error(xhr, status, error) {
   return message;
 }
 
+function ajax_wrapper(options) {
+  // get original callback functions
+  var error_original = function(xhr, status, error) {};
+  if (options.error) {
+    error_original = options.error;
+  }
+  var complete_original = function(xhr, status) {};
+  if (options.complete) {
+    complete_original = options.complete;
+  }
+
+  // prepare new callback functions
+  var options_new = $.extend(true, {}, options);
+  // display login dialog on error
+  options_new.error = function(xhr, status, error) {
+    if (xhr.status == 401) {
+      ajax_queue.push(options);
+      if (!login_dialog_opened) {
+        login_dialog(function() {
+          var item;
+          while (ajax_queue.length > 0) {
+            item = ajax_queue.shift();
+            ajax_wrapper(item);
+          }
+        });
+      }
+    }
+    else {
+      error_original(xhr, status, error);
+    }
+  }
+  // Do not run complete function if login dialog is open.
+  // Once user is logged in again, the original complete function will be run
+  // in repeated ajax call run by login dialog on success.
+  options_new.complete = function(xhr, status) {
+    if (xhr.status == 401) {
+      return;
+    }
+    else {
+      complete_original(xhr, status);
+    }
+  }
+
+  // run ajax request or put it into a queue
+  if (login_dialog_opened) {
+    ajax_queue.push(options);
+  }
+  else {
+    $.ajax(options_new);
+  }
+}
+
+function login_dialog(on_success) {
+  var ok_button_id = "login_form_ok";
+  var ok_button_selector = "#" + ok_button_id;
+  var buttons = [
+    {
+      text: "Log In",
+      id: ok_button_id,
+      click: function() {
+        var me = $(this);
+        var my_dialog = $(this).dialog()
+        my_dialog.find("#login_form_denied").hide();
+        $(ok_button_selector).button("option", "disabled", true);
+        $.ajax({
+          type: "POST",
+          url: "/login",
+          data: my_dialog.find("#login_form").serialize(),
+          complete: function() {
+            $(ok_button_selector).button("option", "disabled", false);
+          },
+          success: function() {
+            my_dialog.find("#login_form_username").val("");
+            my_dialog.find("#login_form_password").val("");
+            me.dialog("destroy");
+            login_dialog_opened = false;
+            on_success();
+          },
+          error: function(xhr, status, error) {
+            if (xhr.status == 401) {
+              my_dialog.find("#login_form_denied").show();
+              my_dialog.find("#login_form_password").val("");
+            }
+            else {
+              alert("Login error " + ajax_simple_error(xhr, status, error));
+            }
+          },
+        });
+      },
+    },
+    {
+      text: "Cancel",
+      id: "login_form_cancel",
+      // cancel will close the dialog the same way as X button does
+      click: function() {
+        $(this).dialog("close");
+      },
+    },
+  ];
+  var dialog_obj = $("#dialog_login").dialog({
+    title: "Log In",
+    modal: true,
+    resizable: true,
+    width: 400,
+    buttons: buttons,
+    open: function(event, ui) {
+      login_dialog_opened = true;
+    },
+    create: function(event, ui) {
+      login_dialog_opened = true;
+    },
+    // make sure to logout the user on dialog close
+    close: function(event, ui) {
+      login_dialog_opened = false;
+      location = "/logout";
+    },
+  });
+  dialog_obj.find("#login_form_denied").hide();
+  // submit on enter
+  dialog_obj.keypress(function(e) {
+    if (
+      e.keyCode == $.ui.keyCode.ENTER
+      &&
+      !dialog_obj.parent().find(ok_button_selector).button("option", "disabled")
+    ) {
+      dialog_obj.parent().find(ok_button_selector).trigger("click");
+      return false;
+    }
+  });
+}
+
 var permissions_current_cluster;
 
 function permissions_load_all() {
@@ -2218,7 +2436,7 @@ function permissions_load_all() {
 
 function permissions_load_cluster(cluster_name, callback) {
   var element_id = "permissions_cluster_" + cluster_name;
-  $.ajax({
+  ajax_wrapper({
     type: "GET",
     url: "/permissions_cluster_form/" + cluster_name,
     timeout: pcs_timeout,
@@ -2264,9 +2482,9 @@ function permissions_show_cluster(cluster_name, list_row) {
 function permissions_save_cluster(form) {
   var dataString = $(form).serialize();
   var cluster_name = permissions_get_clustername(form);
-  $.ajax({
+  ajax_wrapper({
     type: "POST",
-    url: "/permissions_save/",
+    url: get_cluster_remote_url(cluster_name) + "permissions_save",
     timeout: pcs_timeout,
     data: dataString,
     success: function() {
@@ -2477,7 +2695,7 @@ function set_utilization(type, entity_id, name, value) {
   } else return false;
   var url = get_cluster_remote_url() + "set_" + type + "_utilization";
 
-  $.ajax({
+  ajax_wrapper({
     type: 'POST',
     url: url,
     data: data,
@@ -2500,3 +2718,20 @@ function is_integer(str) {
   var n = ~~Number(str);
   return String(n) === str;
 }
+
+Ember.Handlebars.helper('selector-helper', function (content, value, place_holder, options) {
+  var out = "";
+  var line;
+  if (place_holder) {
+    out += '<option value="">' + place_holder + '</option>'; 
+  }
+  $.each(content, function(_, opt){
+    line = '<option value="' + opt["value"] + '"';
+    if (value == opt["value"]) {
+      line += ' selected="selected"'
+    }
+    line += ">" + Handlebars.Utils.escapeExpression(opt["name"]) + "</option>";
+    out += line + "\n";
+  });
+  return new Handlebars.SafeString(out);
+});
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 4b8505b..abf2683 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -3,6 +3,7 @@ require 'uri'
 require 'open4'
 require 'set'
 require 'timeout'
+require 'rexml/document'
 
 require 'pcs.rb'
 require 'resource.rb'
@@ -13,14 +14,13 @@ require 'permissions.rb'
 require 'auth.rb'
 
 # Commands for remote access
-def remote(params, request, session)
+def remote(params, request, auth_user)
   remote_cmd_without_pacemaker = {
       :status => method(:node_status),
       :status_all => method(:status_all),
       :cluster_status => method(:cluster_status_remote),
       :auth => method(:auth),
       :check_auth => method(:check_auth),
-      :fix_auth_of_cluster => method(:fix_auth_of_cluster),
       :setup_cluster => method(:setup_cluster),
       :create_cluster => method(:create_cluster),
       :get_quorum_info => method(:get_quorum_info),
@@ -46,25 +46,23 @@ def remote(params, request, session)
       :cluster_enable => method(:cluster_enable),
       :cluster_disable => method(:cluster_disable),
       :resource_status => method(:resource_status),
-      :check_gui_status => method(:check_gui_status),
       :get_sw_versions => method(:get_sw_versions),
       :node_available => method(:remote_node_available),
-      :add_node_all => lambda { |params_, request_, session_|
-        remote_add_node(params_, request_, session_, true)
+      :add_node_all => lambda { |params_, request_, auth_user_|
+        remote_add_node(params_, request_, auth_user_, true)
       },
-      :add_node => lambda { |params_, request_, session_|
-        remote_add_node(params_, request_, session_, false)
+      :add_node => lambda { |params_, request_, auth_user_|
+        remote_add_node(params_, request_, auth_user_, false)
       },
       :remove_nodes => method(:remote_remove_nodes),
       :remove_node => method(:remote_remove_node),
       :cluster_destroy => method(:cluster_destroy),
       :get_wizard => method(:get_wizard),
       :wizard_submit => method(:wizard_submit),
-      :auth_gui_against_nodes => method(:auth_gui_against_nodes),
       :get_tokens => method(:get_tokens),
       :get_cluster_tokens => method(:get_cluster_tokens),
       :save_tokens => method(:save_tokens),
-      :add_node_to_cluster => method(:add_node_to_cluster),
+      :get_cluster_properties_definition => method(:get_cluster_properties_definition)
   }
   remote_cmd_with_pacemaker = {
       :resource_start => method(:resource_start),
@@ -105,10 +103,12 @@ def remote(params, request, session)
   command = params[:command].to_sym
 
   if remote_cmd_without_pacemaker.include? command
-    return remote_cmd_without_pacemaker[command].call(params, request, session)
+    return remote_cmd_without_pacemaker[command].call(
+      params, request, auth_user
+    )
   elsif remote_cmd_with_pacemaker.include? command
     if pacemaker_running?
-      return remote_cmd_with_pacemaker[command].call(params, request, session)
+      return remote_cmd_with_pacemaker[command].call(params, request, auth_user)
     else
       return [200,'{"pacemaker_not_running":true}']
     end
@@ -118,9 +118,9 @@ def remote(params, request, session)
 end
 
 # provides remote cluster status to a local gui
-def cluster_status_gui(session, cluster_name, dont_update_config=false)
+def cluster_status_gui(auth_user, cluster_name, dont_update_config=false)
   cluster_nodes = get_cluster_nodes(cluster_name)
-  status = cluster_status_from_nodes(session, cluster_nodes, cluster_name)
+  status = cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
   unless status
     return 403, 'Permission denied'
   end
@@ -144,15 +144,15 @@ def cluster_status_gui(session, cluster_name, dont_update_config=false)
       Cfgsync::save_sync_new_version(
           sync_config, get_corosync_nodes(), $cluster_name, true
       )
-      return cluster_status_gui(session, cluster_name, true)
+      return cluster_status_gui(auth_user, cluster_name, true)
     end
   end
   return JSON.generate(status)
 end
 
 # get cluster status and return it to a remote gui or other client
-def cluster_status_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def cluster_status_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
 
@@ -172,20 +172,20 @@ def cluster_status_remote(params, request, session)
   end
 
   cluster_nodes = get_nodes().flatten
-  status = cluster_status_from_nodes(session, cluster_nodes, cluster_name)
+  status = cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
   unless status
     return 403, 'Permission denied'
   end
   return JSON.generate(status)
 end
 
-def cluster_start(params, request, session)
+def cluster_start(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'cluster_start', true
+      auth_user, params[:name], 'cluster_start', true
     )
   else
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
     $logger.info "Starting Daemons"
@@ -195,16 +195,16 @@ def cluster_start(params, request, session)
   end
 end
 
-def cluster_stop(params, request, session)
+def cluster_stop(params, request, auth_user)
   if params[:name]
     params_without_name = params.reject {|key, value|
       key == "name" or key == :name
     }
     code, response = send_request_with_token(
-      session, params[:name], 'cluster_stop', true, params_without_name
+      auth_user, params[:name], 'cluster_stop', true, params_without_name
     )
   else
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
     options = []
@@ -217,7 +217,9 @@ def cluster_stop(params, request, session)
     end
     options << "--force" if params["force"]
     $logger.info "Stopping Daemons"
-    stdout, stderr, retval = run_cmd(session, PCS, "cluster", "stop", *options)
+    stdout, stderr, retval = run_cmd(
+      auth_user, PCS, "cluster", "stop", *options
+    )
     if retval != 0
       return [400, stderr.join]
     else
@@ -226,17 +228,17 @@ def cluster_stop(params, request, session)
   end
 end
 
-def config_backup(params, request, session)
+def config_backup(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'config_backup', true
+      auth_user, params[:name], 'config_backup', true
     )
   else
-    if not allowed_for_local_cluster(session, Permissions::FULL)
+    if not allowed_for_local_cluster(auth_user, Permissions::FULL)
       return 403, 'Permission denied'
     end
     $logger.info "Backup node configuration"
-    stdout, stderr, retval = run_cmd(session, PCS, "config", "backup")
+    stdout, stderr, retval = run_cmd(auth_user, PCS, "config", "backup")
     if retval == 0
         $logger.info "Backup successful"
         return [200, stdout]
@@ -246,14 +248,14 @@ def config_backup(params, request, session)
   end
 end
 
-def config_restore(params, request, session)
+def config_restore(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'config_restore', true,
+      auth_user, params[:name], 'config_restore', true,
       {:tarball => params[:tarball]}
     )
   else
-    if not allowed_for_local_cluster(session, Permissions::FULL)
+    if not allowed_for_local_cluster(auth_user, Permissions::FULL)
       return 403, 'Permission denied'
     end
     $logger.info "Restore node configuration"
@@ -281,13 +283,13 @@ def config_restore(params, request, session)
   end
 end
 
-def node_restart(params, request, session)
+def node_restart(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'node_restart', true
+      auth_user, params[:name], 'node_restart', true
     )
   else
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
     $logger.info "Restarting Node"
@@ -297,48 +299,48 @@ def node_restart(params, request, session)
   end
 end
 
-def node_standby(params, request, session)
+def node_standby(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'node_standby', true, {"node"=>params[:name]}
+      auth_user, params[:name], 'node_standby', true, {"node"=>params[:name]}
     )
     # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
   else
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
     $logger.info "Standby Node"
-    stdout, stderr, retval = run_cmd(session, PCS, "cluster", "standby")
+    stdout, stderr, retval = run_cmd(auth_user, PCS, "cluster", "standby")
     return stdout
   end
 end
 
-def node_unstandby(params, request, session)
+def node_unstandby(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'node_unstandby', true, {"node"=>params[:name]}
+      auth_user, params[:name], 'node_unstandby', true, {"node"=>params[:name]}
     )
     # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
   else
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
     $logger.info "Unstandby Node"
-    stdout, stderr, retval = run_cmd(session, PCS, "cluster", "unstandby")
+    stdout, stderr, retval = run_cmd(auth_user, PCS, "cluster", "unstandby")
     return stdout
   end
 end
 
-def cluster_enable(params, request, session)
+def cluster_enable(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'cluster_enable', true
+      auth_user, params[:name], 'cluster_enable', true
     )
   else
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
-    success = enable_cluster(session)
+    success = enable_cluster(auth_user)
     if not success
       return JSON.generate({"error" => "true"})
     end
@@ -346,16 +348,16 @@ def cluster_enable(params, request, session)
   end
 end
 
-def cluster_disable(params, request, session)
+def cluster_disable(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      session, params[:name], 'cluster_disable', true
+      auth_user, params[:name], 'cluster_disable', true
     )
   else
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
-    success = disable_cluster(session)
+    success = disable_cluster(auth_user)
     if not success
       return JSON.generate({"error" => "true"})
     end
@@ -363,16 +365,16 @@ def cluster_disable(params, request, session)
   end
 end
 
-def get_quorum_info(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def get_quorum_info(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
   if ISRHEL6
     stdout_status, stderr_status, retval = run_cmd(
-      PCSAuth.getSuperuserSession, CMAN_TOOL, "status"
+      PCSAuth.getSuperuserAuth(), CMAN_TOOL, "status"
     )
     stdout_nodes, stderr_nodes, retval = run_cmd(
-      PCSAuth.getSuperuserSession,
+      PCSAuth.getSuperuserAuth(),
       CMAN_TOOL, "nodes", "-F", "id,type,votes,name"
     )
     if stderr_status.length > 0
@@ -384,7 +386,7 @@ def get_quorum_info(params, request, session)
     end
   else
     stdout, stderr, retval = run_cmd(
-      PCSAuth.getSuperuserSession, COROSYNC_QUORUMTOOL, "-p", "-s"
+      PCSAuth.getSuperuserAuth(), COROSYNC_QUORUMTOOL, "-p", "-s"
     )
     # retval is 0 on success if node is not in partition with quorum
     # retval is 1 on error OR on success if node has quorum
@@ -396,11 +398,11 @@ def get_quorum_info(params, request, session)
   end
 end
 
-def get_cib(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def get_cib(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
-  cib, stderr, retval = run_cmd(session, CIBADMIN, "-Ql")
+  cib, stderr, retval = run_cmd(auth_user, CIBADMIN, "-Ql")
   if retval != 0
     if not pacemaker_running?
       return [400, '{"pacemaker_not_running":true}']
@@ -411,15 +413,15 @@ def get_cib(params, request, session)
   end
 end
 
-def get_corosync_conf_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def get_corosync_conf_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
   return get_corosync_conf()
 end
 
-def set_cluster_conf(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def set_cluster_conf(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   if params[:cluster_conf] != nil and params[:cluster_conf].strip != ""
@@ -432,8 +434,8 @@ def set_cluster_conf(params, request, session)
   end
 end
 
-def set_corosync_conf(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def set_corosync_conf(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   if params[:corosync_conf] != nil and params[:corosync_conf].strip != ""
@@ -446,14 +448,14 @@ def set_corosync_conf(params, request, session)
   end
 end
 
-def get_sync_capabilities(params, request, session)
+def get_sync_capabilities(params, request, auth_user)
   return JSON.generate({
     'syncable_configs' => Cfgsync::get_cfg_classes_by_name().keys,
   })
 end
 
-def set_sync_options(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def set_sync_options(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
 
@@ -502,8 +504,8 @@ def set_sync_options(params, request, session)
   return [400, 'Exactly one option has to be specified']
 end
 
-def get_configs(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def get_configs(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   if not $cluster_name or $cluster_name.empty?
@@ -526,8 +528,8 @@ def get_configs(params, request, session)
   return JSON.generate(out)
 end
 
-def set_configs(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def set_configs(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   return JSON.generate({'status' => 'bad_json'}) if not params['configs']
@@ -573,8 +575,8 @@ def set_configs(params, request, session)
   }
 end
 
-def set_certs(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def set_certs(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
 
@@ -617,8 +619,8 @@ def set_certs(params, request, session)
   return [200, 'success']
 end
 
-def get_permissions_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::GRANT)
+def get_permissions_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::GRANT)
     return 403, 'Permission denied'
   end
 
@@ -632,8 +634,8 @@ def get_permissions_remote(params, request, session)
   return [200, JSON.generate(data)]
 end
 
-def set_permissions_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::GRANT)
+def set_permissions_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::GRANT)
     return 403, 'Permission denied'
   end
 
@@ -703,7 +705,7 @@ def set_permissions_remote(params, request, session)
         break
       end
     }
-    if not allowed_for_local_cluster(session, Permissions::FULL)
+    if not allowed_for_local_cluster(auth_user, Permissions::FULL)
       return [
         403,
         "Permission denied\nOnly #{SUPERUSER} and users with #{label} "\
@@ -724,54 +726,12 @@ def set_permissions_remote(params, request, session)
   return 400, 'Unable to save permissions'
 end
 
-def remote_pcsd_restart(params, request, session)
+def remote_pcsd_restart(params, request, auth_user)
   pcsd_restart()
   return [200, 'success']
 end
 
-def check_gui_status(params, request, session)
-  node_results = {}
-  if params[:nodes] != nil and params[:nodes] != ""
-    node_array = params[:nodes].split(",")
-    online, offline, notauthorized = check_gui_status_of_nodes(
-      session, node_array
-    )
-    online.each { |node|
-      node_results[node] = "Online"
-    }
-    offline.each { |node|
-      node_results[node] = "Offline"
-    }
-    notauthorized.each { |node|
-      node_results[node] = "Unable to authenticate"
-    }
-  end
-  return JSON.generate(node_results)
-end
-
-def get_sw_versions(params, request, session)
-  if params[:nodes] != nil and params[:nodes] != ""
-    nodes = params[:nodes].split(",")
-    final_response = {}
-    threads = []
-    nodes.each {|node|
-      threads << Thread.new {
-        code, response = send_request_with_token(
-          session, node, 'get_sw_versions'
-        )
-        begin
-          node_response = JSON.parse(response)
-          if node_response and node_response["notoken"] == true
-            $logger.error("ERROR: bad token for #{node}")
-          end
-          final_response[node] = node_response
-        rescue JSON::ParserError => e
-        end
-      }
-    }
-    threads.each { |t| t.join }
-    return JSON.generate(final_response)
-  end
+def get_sw_versions(params, request, auth_user)
   versions = {
     "rhel" => get_rhel_version(),
     "pcs" => get_pcsd_version(),
@@ -782,15 +742,15 @@ def get_sw_versions(params, request, session)
   return JSON.generate(versions)
 end
 
-def remote_node_available(params, request, session)
+def remote_node_available(params, request, auth_user)
   if (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or File.exist?("/var/lib/pacemaker/cib/cib.xml")
     return JSON.generate({:node_available => false})
   end
   return JSON.generate({:node_available => true})
 end
 
-def remote_add_node(params, request, session, all=false)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def remote_add_node(params, request, auth_user, all=false)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   auto_start = false
@@ -803,7 +763,7 @@ def remote_add_node(params, request, session, all=false)
     if params[:new_ring1addr] != nil
       node += ',' + params[:new_ring1addr]
     end
-    retval, output = add_node(session, node, all, auto_start)
+    retval, output = add_node(auth_user, node, all, auto_start)
   end
 
   if retval == 0
@@ -813,8 +773,8 @@ def remote_add_node(params, request, session, all=false)
   return [400,output]
 end
 
-def remote_remove_nodes(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def remote_remove_nodes(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   count = 0
@@ -837,14 +797,14 @@ def remote_remove_nodes(params, request, session)
   # - get possible quorum loss warning
   stop_params = node_list + options
   stdout, stderr, retval = run_cmd(
-    session, PCS, "cluster", "stop", *stop_params
+    auth_user, PCS, "cluster", "stop", *stop_params
   )
   if retval != 0
     return [400, stderr.join]
   end
 
   node_list.each {|node|
-    retval, output = remove_node(session, node, true)
+    retval, output = remove_node(auth_user, node, true)
     out = out + output.join("\n")
   }
   config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
@@ -854,12 +814,12 @@ def remote_remove_nodes(params, request, session)
   return out
 end
 
-def remote_remove_node(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def remote_remove_node(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   if params[:remove_nodename] != nil
-    retval, output = remove_node(session, params[:remove_nodename])
+    retval, output = remove_node(auth_user, params[:remove_nodename])
   else
     return 400, "No nodename specified"
   end
@@ -871,8 +831,8 @@ def remote_remove_node(params, request, session)
   return JSON.generate([retval,output])
 end
 
-def setup_cluster(params, request, session)
-  if not allowed_for_superuser(session)
+def setup_cluster(params, request, auth_user)
+  if not allowed_for_superuser(auth_user)
     return 403, 'Permission denied'
   end
   $logger.info("Setting up cluster: " + params.inspect)
@@ -921,7 +881,7 @@ def setup_cluster(params, request, session)
   nodes_options = nodes + options
   nodes_options += options_udp if transport_udp
   stdout, stderr, retval = run_cmd(
-    session, PCS, "cluster", "setup", "--enable", "--start",
+    auth_user, PCS, "cluster", "setup", "--enable", "--start",
     "--name", params[:clustername], *nodes_options
   )
   if retval != 0
@@ -933,22 +893,22 @@ def setup_cluster(params, request, session)
   return 200
 end
 
-def create_cluster(params, request, session)
-  if not allowed_for_superuser(session)
+def create_cluster(params, request, auth_user)
+  if not allowed_for_superuser(auth_user)
     return 403, 'Permission denied'
   end
-  if set_corosync_conf(params, request, session)
-    cluster_start(params, request, session)
+  if set_corosync_conf(params, request, auth_user)
+    cluster_start(params, request, auth_user)
   else
     return "Failed"
   end
 end
 
-def node_status(params, request, session)
+def node_status(params, request, auth_user)
   if params[:node] and params[:node] != '' and params[:node] !=
     $cur_node_name and !params[:redirected]
     return send_request_with_token(
-      session,
+      auth_user,
       params[:node],
       'status?redirected=1',
       false,
@@ -958,24 +918,24 @@ def node_status(params, request, session)
     )
   end
 
-  if not allowed_for_local_cluster(session, Permissions::READ)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
 
-  cib_dom = get_cib_dom(session)
-  crm_dom = get_crm_mon_dom(session)
+  cib_dom = get_cib_dom(auth_user)
+  crm_dom = get_crm_mon_dom(auth_user)
 
-  status = get_node_status(session, cib_dom)
+  status = get_node_status(auth_user, cib_dom)
   resources = get_resources(
     cib_dom,
     crm_dom,
     (params[:operations] and params[:operations] == '1')
   )
 
-  node = ClusterEntity::Node.load_current_node(session, crm_dom)
+  node = ClusterEntity::Node.load_current_node(crm_dom)
 
   _,_,not_authorized_nodes = check_gui_status_of_nodes(
-    session,
+    auth_user,
     status[:known_nodes],
     false,
     3
@@ -1057,7 +1017,7 @@ def node_status(params, request, session)
   return JSON.generate(old_status)
 end
 
-def status_all(params, request, session, nodes=[], dont_update_config=false)
+def status_all(params, request, auth_user, nodes=[], dont_update_config=false)
   if nodes == nil
     return JSON.generate({"error" => "true"})
   end
@@ -1067,7 +1027,7 @@ def status_all(params, request, session, nodes=[], dont_update_config=false)
   forbidden_nodes = {}
   nodes.each {|node|
     threads << Thread.new {
-      code, response = send_request_with_token(session, node, 'status')
+      code, response = send_request_with_token(auth_user, node, 'status')
       if 403 == code
         forbidden_nodes[node] = true
       end
@@ -1109,14 +1069,14 @@ def status_all(params, request, session, nodes=[], dont_update_config=false)
       Cfgsync::save_sync_new_version(
         sync_config, get_corosync_nodes(), $cluster_name, true
       )
-      return status_all(params, request, session, node_list, true)
+      return status_all(params, request, auth_user, node_list, true)
     end
   end
   $logger.debug("NODE LIST: " + node_list.inspect)
   return JSON.generate(final_response)
 end
 
-def clusters_overview(params, request, session)
+def clusters_overview(params, request, auth_user)
   cluster_map = {}
   forbidden_clusters = {}
   threads = []
@@ -1135,7 +1095,7 @@ def clusters_overview(params, request, session)
       }
       overview_cluster = nil
       online, offline, not_authorized_nodes = check_gui_status_of_nodes(
-        session,
+        auth_user,
         get_cluster_nodes(cluster.name),
         false,
         3
@@ -1147,7 +1107,7 @@ def clusters_overview(params, request, session)
       nodes_not_in_cluster = []
       for node in cluster_nodes_auth
         code, response = send_request_with_token(
-          session, node, 'cluster_status', true, {}, true, nil, 8
+          auth_user, node, 'cluster_status', true, {}, true, nil, 8
         )
         if code == 404
           not_supported = true
@@ -1283,7 +1243,7 @@ def clusters_overview(params, request, session)
   return JSON.generate(overview)
 end
 
-def auth(params, request, session)
+def auth(params, request, auth_user)
   token = PCSAuth.validUser(params['username'],params['password'], true)
   # If we authorized to this machine, attempt to authorize everywhere
   node_list = []
@@ -1295,7 +1255,7 @@ def auth(params, request, session)
     }
     if node_list.length > 0
       pcs_auth(
-        session, node_list, params['username'], params['password'],
+        auth_user, node_list, params['username'], params['password'],
         params["force"] == "1"
       )
     end
@@ -1304,7 +1264,7 @@ def auth(params, request, session)
 end
 
 # If we get here, we're already authorized
-def check_auth(params, request, session)
+def check_auth(params, request, auth_user)
   if params.include?("check_auth_only")
     return [200, "{\"success\":true}"]
   end
@@ -1315,12 +1275,12 @@ def check_auth(params, request, session)
 end
 
 # not used anymore, left here for backward compatability reasons
-def resource_status(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def resource_status(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
   resource_id = params[:resource]
-  @resources, at groups = getResourcesGroups(session)
+  @resources, at groups = getResourcesGroups(auth_user)
   location = ""
   res_status = ""
   @resources.each {|r|
@@ -1342,12 +1302,12 @@ def resource_status(params, request, session)
   return JSON.generate(status)
 end
 
-def resource_stop(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_stop(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   stdout, stderr, retval = run_cmd(
-    session, PCS, "resource", "disable", params[:resource]
+    auth_user, PCS, "resource", "disable", params[:resource]
   )
   if retval == 0
     return JSON.generate({"success" => "true"})
@@ -1356,12 +1316,12 @@ def resource_stop(params, request, session)
   end
 end
 
-def resource_cleanup(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_cleanup(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   stdout, stderr, retval = run_cmd(
-    session, PCS, "resource", "cleanup", params[:resource]
+    auth_user, PCS, "resource", "cleanup", params[:resource]
   )
   if retval == 0
     return JSON.generate({"success" => "true"})
@@ -1370,12 +1330,12 @@ def resource_cleanup(params, request, session)
   end
 end
 
-def resource_start(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_start(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   stdout, stderr, retval = run_cmd(
-    session, PCS, "resource", "enable", params[:resource]
+    auth_user, PCS, "resource", "enable", params[:resource]
   )
   if retval == 0
     return JSON.generate({"success" => "true"})
@@ -1384,12 +1344,12 @@ def resource_start(params, request, session)
   end
 end
 
-def resource_form(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def resource_form(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
 
-  cib_dom = get_cib_dom(session)
+  cib_dom = get_cib_dom(auth_user)
   @cur_resource = get_resource_by_id(params[:resource], cib_dom)
   @groups = get_resource_groups(cib_dom)
   @version = params[:version]
@@ -1400,11 +1360,11 @@ def resource_form(params, request, session)
     @cur_resource_ms = @cur_resource.get_master
     @resource = ResourceAgent.new(@cur_resource.agentname)
     if @cur_resource.provider == 'heartbeat'
-      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + @cur_resource.type)
+      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, HEARTBEAT_AGENTS_DIR + @cur_resource.type)
     elsif @cur_resource.provider == 'pacemaker'
-      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + @cur_resource.type)
+      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, PACEMAKER_AGENTS_DIR + @cur_resource.type)
     elsif @cur_resource._class == 'nagios'
-      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + @cur_resource.type + '.xml')
+      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, NAGIOS_METADATA_DIR + @cur_resource.type + '.xml')
     end
     @existing_resource = true
     if @resource
@@ -1417,15 +1377,15 @@ def resource_form(params, request, session)
   end
 end
 
-def fence_device_form(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def fence_device_form(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
 
-  @cur_resource = get_resource_by_id(params[:resource], get_cib_dom(session))
+  @cur_resource = get_resource_by_id(params[:resource], get_cib_dom(auth_user))
 
   if @cur_resource.instance_of?(ClusterEntity::Primitive) and @cur_resource.stonith
-    @resource_agents = getFenceAgents(session, @cur_resource.agentname)
+    @resource_agents = getFenceAgents(auth_user, @cur_resource.agentname)
     @existing_resource = true
     @fenceagent = @resource_agents[@cur_resource.type]
     erb :fenceagentform
@@ -1435,15 +1395,15 @@ def fence_device_form(params, request, session)
 end
 
 # Creates resource if params[:resource_id] is not set
-def update_resource (params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def update_resource (params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
   param_line = getParamList(params)
   if not params[:resource_id]
     out, stderr, retval = run_cmd(
-      session,
+      auth_user,
       PCS, "resource", "create", params[:name], params[:resource_type],
       *param_line
     )
@@ -1452,7 +1412,7 @@ def update_resource (params, request, session)
     end
     if params[:resource_group] and params[:resource_group] != ""
       run_cmd(
-        session,
+        auth_user,
         PCS, "resource","group", "add", params[:resource_group], params[:name]
       )
       resource_group = params[:resource_group]
@@ -1460,10 +1420,10 @@ def update_resource (params, request, session)
 
     if params[:resource_clone] and params[:resource_clone] != ""
       name = resource_group ? resource_group : params[:name]
-      run_cmd(session, PCS, "resource", "clone", name)
+      run_cmd(auth_user, PCS, "resource", "clone", name)
     elsif params[:resource_ms] and params[:resource_ms] != ""
       name = resource_group ? resource_group : params[:name]
-      run_cmd(session, PCS, "resource", "master", name)
+      run_cmd(auth_user, PCS, "resource", "master", name)
     end
 
     return JSON.generate({})
@@ -1475,7 +1435,7 @@ def update_resource (params, request, session)
       params[:resource_id].sub!(/(.*):.*/,'\1')
     end
     run_cmd(
-      session, PCS, "resource", "update", params[:resource_id], *param_line
+      auth_user, PCS, "resource", "update", params[:resource_id], *param_line
     )
   end
 
@@ -1483,41 +1443,41 @@ def update_resource (params, request, session)
     if params[:resource_group] == ""
       if params[:_orig_resource_group] != ""
         run_cmd(
-          session, PCS, "resource", "group", "remove",
+          auth_user, PCS, "resource", "group", "remove",
           params[:_orig_resource_group], params[:resource_id]
         )
       end
     else
       run_cmd(
-        session, PCS, "resource", "group", "add", params[:resource_group],
+        auth_user, PCS, "resource", "group", "add", params[:resource_group],
         params[:resource_id]
       )
     end
   end
 
   if params[:resource_clone] and params[:_orig_resource_clone] == "false"
-    run_cmd(session, PCS, "resource", "clone", params[:resource_id])
+    run_cmd(auth_user, PCS, "resource", "clone", params[:resource_id])
   end
   if params[:resource_ms] and params[:_orig_resource_ms] == "false"
-    run_cmd(session, PCS, "resource", "master", params[:resource_id])
+    run_cmd(auth_user, PCS, "resource", "master", params[:resource_id])
   end
 
   if params[:_orig_resource_clone] == "true" and not params[:resource_clone]
     run_cmd(
-      session, PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,'')
+      auth_user, PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,'')
     )
   end
   if params[:_orig_resource_ms] == "true" and not params[:resource_ms]
     run_cmd(
-      session, PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,'')
+      auth_user, PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,'')
     )
   end
 
   return JSON.generate({})
 end
 
-def update_fence_device(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def update_fence_device(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -1528,7 +1488,7 @@ def update_fence_device(params, request, session)
 
   if not params[:resource_id]
     out, stderr, retval = run_cmd(
-      session,
+      auth_user,
       PCS, "stonith", "create", params[:name], params[:resource_type],
       *param_line
     )
@@ -1540,7 +1500,7 @@ def update_fence_device(params, request, session)
 
   if param_line.length != 0
     out, stderr, retval = run_cmd(
-      session, PCS, "stonith", "update", params[:resource_id], *param_line
+      auth_user, PCS, "stonith", "update", params[:resource_id], *param_line
     )
     if retval != 0
       return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
@@ -1549,24 +1509,24 @@ def update_fence_device(params, request, session)
   return "{}"
 end
 
-def get_avail_resource_agents(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def get_avail_resource_agents(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
-  agents = getResourceAgents(session)
+  agents = getResourceAgents(auth_user)
   return JSON.generate(agents)
 end
 
-def get_avail_fence_agents(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def get_avail_fence_agents(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
-  agents = getFenceAgents(session)
+  agents = getFenceAgents(auth_user)
   return JSON.generate(agents)
 end
 
-def resource_metadata(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def resource_metadata(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
   return 200 if not params[:resourcename] or params[:resourcename] == ""
@@ -1575,32 +1535,32 @@ def resource_metadata(params, request, session)
 
   @resource = ResourceAgent.new(params[:resourcename])
   if class_provider == "ocf:heartbeat"
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + resource_name)
+    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, HEARTBEAT_AGENTS_DIR + resource_name)
   elsif class_provider == "ocf:pacemaker"
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + resource_name)
+    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, PACEMAKER_AGENTS_DIR + resource_name)
   elsif class_provider == 'nagios'
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + resource_name + '.xml')
+    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, NAGIOS_METADATA_DIR + resource_name + '.xml')
   end
   @new_resource = params[:new]
-  @resources, @groups = getResourcesGroups(session)
+  @resources, @groups = getResourcesGroups(auth_user)
 
   erb :resourceagentform
 end
 
-def fence_device_metadata(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def fence_device_metadata(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
   return 200 if not params[:resourcename] or params[:resourcename] == ""
   @fenceagent = FenceAgent.new(params[:resourcename])
-  @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(session, params[:resourcename])
+  @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(auth_user, params[:resourcename])
   @new_fenceagent = params[:new]
   
   erb :fenceagentform
 end
 
-def remove_resource(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def remove_resource(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   force = params['force']
@@ -1611,7 +1571,7 @@ def remove_resource(params, request, session)
       resid = k.gsub('resid-', '')
       command = [PCS, 'resource', 'delete', resid]
       command << '--force' if force
-      out, errout, retval = run_cmd(session, *command)
+      out, errout, retval = run_cmd(auth_user, *command)
       if retval != 0
         unless out.index(" does not exist.") != -1 and no_error_if_not_exists  
           errors += errout.join(' ').strip + "\n"
@@ -1628,12 +1588,12 @@ def remove_resource(params, request, session)
   end
 end
 
-def add_fence_level_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def add_fence_level_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   retval, stdout, stderr = add_fence_level(
-    session, params["level"], params["devices"], params["node"], params["remove"]
+    auth_user, params["level"], params["devices"], params["node"], params["remove"]
   )
   if retval == 0
     return [200, "Successfully added fence level"]
@@ -1642,12 +1602,12 @@ def add_fence_level_remote(params, request, session)
   end
 end
 
-def add_node_attr_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def add_node_attr_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   retval = add_node_attr(
-    session, params["node"], params["key"], params["value"]
+    auth_user, params["node"], params["key"], params["value"]
   )
   # retval = 2 if removing attr which doesn't exist
   if retval == 0 or retval == 2
@@ -1657,11 +1617,11 @@ def add_node_attr_remote(params, request, session)
   end
 end
 
-def add_acl_role_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::GRANT)
+def add_acl_role_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::GRANT)
     return 403, 'Permission denied'
   end
-  retval = add_acl_role(session, params["name"], params["description"])
+  retval = add_acl_role(auth_user, params["name"], params["description"])
   if retval == ""
     return [200, "Successfully added ACL role"]
   else
@@ -1672,15 +1632,15 @@ def add_acl_role_remote(params, request, session)
   end
 end
 
-def remove_acl_roles_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::GRANT)
+def remove_acl_roles_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::GRANT)
     return 403, 'Permission denied'
   end
   errors = ""
   params.each { |name, value|
     if name.index("role-") == 0
       out, errout, retval = run_cmd(
-        session, PCS, "acl", "role", "delete", value.to_s, "--autodelete"
+        auth_user, PCS, "acl", "role", "delete", value.to_s, "--autodelete"
       )
       if retval != 0
         errors += "Unable to remove role #{value}"
@@ -1699,18 +1659,18 @@ def remove_acl_roles_remote(params, request, session)
   end
 end
 
-def add_acl_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::GRANT)
+def add_acl_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::GRANT)
     return 403, 'Permission denied'
   end
   if params["item"] == "permission"
     retval = add_acl_permission(
-      session,
+      auth_user,
       params["role_id"], params["type"], params["xpath_id"], params["query_id"]
     )
   elsif (params["item"] == "user") or (params["item"] == "group")
     retval = add_acl_usergroup(
-      session, params["role_id"], params["item"], params["usergroup"]
+      auth_user, params["role_id"], params["item"], params["usergroup"]
     )
   else
     retval = "Error: Unknown adding request"
@@ -1726,15 +1686,15 @@ def add_acl_remote(params, request, session)
   end
 end
 
-def remove_acl_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::GRANT)
+def remove_acl_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::GRANT)
     return 403, 'Permission denied'
   end
   if params["item"] == "permission"
-    retval = remove_acl_permission(session, params["acl_perm_id"])
+    retval = remove_acl_permission(auth_user, params["acl_perm_id"])
   elsif params["item"] == "usergroup"
     retval = remove_acl_usergroup(
-      session, params["role_id"],params["usergroup_id"]
+      auth_user, params["role_id"],params["usergroup_id"]
     )
   else
     retval = "Error: Unknown removal request"
@@ -1747,12 +1707,12 @@ def remove_acl_remote(params, request, session)
   end
 end
 
-def add_meta_attr_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def add_meta_attr_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   retval = add_meta_attr(
-    session, params["res_id"], params["key"],params["value"]
+    auth_user, params["res_id"], params["key"],params["value"]
   )
   if retval == 0
     return [200, "Successfully added meta attribute"]
@@ -1761,14 +1721,14 @@ def add_meta_attr_remote(params, request, session)
   end
 end
 
-def add_constraint_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def add_constraint_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   case params["c_type"]
   when "loc"
     retval, error = add_location_constraint(
-      session,
+      auth_user,
       params["res_id"], params["node_id"], params["score"], params["force"],
       !params['disable_autocorrect']
     )
@@ -1783,7 +1743,7 @@ def add_constraint_remote(params, request, session)
     end
 
     retval, error = add_order_constraint(
-      session,
+      auth_user,
       resA, resB, actionA, actionB, params["score"], true, params["force"],
       !params['disable_autocorrect']
     )
@@ -1800,7 +1760,7 @@ def add_constraint_remote(params, request, session)
     end
 
     retval, error = add_colocation_constraint(
-      session,
+      auth_user,
       resA, resB, score, params["force"], !params['disable_autocorrect']
     )
   else
@@ -1814,13 +1774,13 @@ def add_constraint_remote(params, request, session)
   end
 end
 
-def add_constraint_rule_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def add_constraint_rule_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   if params["c_type"] == "loc"
     retval, error = add_location_constraint_rule(
-      session,
+      auth_user,
       params["res_id"], params["rule"], params["score"], params["force"],
       !params['disable_autocorrect']
     )
@@ -1835,14 +1795,14 @@ def add_constraint_rule_remote(params, request, session)
   end
 end
 
-def add_constraint_set_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def add_constraint_set_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   case params["c_type"]
   when "ord"
     retval, error = add_order_set_constraint(
-      session,
+      auth_user,
       params["resources"].values, params["force"], !params['disable_autocorrect']
     )
   else
@@ -1856,12 +1816,12 @@ def add_constraint_set_remote(params, request, session)
   end
 end
 
-def remove_constraint_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def remove_constraint_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   if params[:constraint_id]
-    retval = remove_constraint(session, params[:constraint_id])
+    retval = remove_constraint(auth_user, params[:constraint_id])
     if retval == 0
       return "Constraint #{params[:constraint_id]} removed"
     else
@@ -1872,12 +1832,12 @@ def remove_constraint_remote(params, request, session)
   end
 end
 
-def remove_constraint_rule_remote(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def remove_constraint_rule_remote(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   if params[:rule_id]
-    retval = remove_constraint_rule(session, params[:rule_id])
+    retval = remove_constraint_rule(auth_user, params[:rule_id])
     if retval == 0
       return "Constraint rule #{params[:rule_id]} removed"
     else
@@ -1888,14 +1848,14 @@ def remove_constraint_rule_remote(params, request, session)
   end
 end
 
-def add_group(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def add_group(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   rg = params["resource_group"]
   resources = params["resources"]
   output, errout, retval = run_cmd(
-    session, PCS, "resource", "group", "add", rg, *(resources.split(" "))
+    auth_user, PCS, "resource", "group", "add", rg, *(resources.split(" "))
   )
   if retval == 0
     return 200
@@ -1904,72 +1864,67 @@ def add_group(params, request, session)
   end
 end
 
-def update_cluster_settings(params, request, session)
-  settings = params["config"]
-  hidden_settings = params["hidden"]
-  hidden_settings.each{|name,val|
-    found = false
-    settings.each{|name2,val2|
-      if name == name2
-        found = true
-        break
-      end
-    }
-    if not found
-      settings[name] = val
-    end
-  }
-  settings.each { |_, val| val.strip!() }
-
-  binary_settings = []
-  changed_settings = []
-  old_settings = {}
-  getConfigOptions2(
-    PCSAuth.getSuperuserSession(), get_nodes().flatten()
-  ).values().flatten().each { |opt|
-    binary_settings << opt.configname if "check" == opt.type
-    # if we don't know current value of an option, consider it changed
-    next if opt.value.nil?
-    if "check" == opt.type
-      old_settings[opt.configname] = is_cib_true(opt.value)
-    else
-      old_settings[opt.configname] = opt.value
+def update_cluster_settings(params, request, auth_user)
+  properties = params['config']
+  to_update = []
+  current = getAllSettings(auth_user)
+
+  # We need to be able to set cluster properties also from older version GUI.
+  # This code handles proper processing of checkboxes.
+  # === backward compatibility layer start ===
+  params['hidden'].each { |prop, val|
+    next if prop == 'hidden_input'
+    unless properties.include?(prop)
+      properties[prop] = val
+      to_update << prop
     end
   }
-  settings.each { |key, val|
-    new_val = binary_settings.include?(key) ? is_cib_true(val) : val
-    # if we don't know current value of an option, consider it changed
-    if (not old_settings.key?(key)) or (old_settings[key] != new_val)
-      changed_settings << key.downcase()
+  # === backward compatibility layer end ===
+
+  properties.each { |prop, val|
+    val.strip!
+    if not current.include?(prop) and val != '' # add
+      to_update << prop
+    elsif current.include?(prop) and val == '' # remove
+      to_update << prop
+    elsif current.include?(prop) and current[prop] != val # update
+      to_update << prop
     end
   }
-  if changed_settings.include?('enable-acl')
-    if not allowed_for_local_cluster(session, Permissions::GRANT)
+
+  if to_update.count { |x| x.downcase == 'enable-acl' } > 0
+    if not allowed_for_local_cluster(auth_user, Permissions::GRANT)
       return 403, 'Permission denied'
     end
   end
-  if changed_settings.count { |x| x != 'enable-acl' } > 0
-    if not allowed_for_local_cluster(session, Permissions::WRITE)
+  if to_update.count { |x| x.downcase != 'enable-acl' } > 0
+    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
     end
   end
 
-  changed_settings.each { |name|
-    val = settings[name]
-    if name == "enable-acl"
-      run_cmd(session, PCS, "property", "set", name + "=" + val, "--force")
-    else
-      run_cmd(session, PCS, "property", "set", name + "=" + val)
+  if to_update.empty?
+    $logger.info('No properties to update')
+  else
+    cmd_args = []
+    to_update.each { |prop|
+      cmd_args << "#{prop.downcase}=#{properties[prop]}"
+    }
+    stdout, stderr, retval = run_cmd(
+      auth_user, PCS, 'property', 'set', *cmd_args
+    )
+    if retval != 0
+      return [400, stderr.join('').gsub(', (use --force to override)', '')]
     end
-  }
+  end
   return [200, "Update Successful"]
 end
 
-def cluster_destroy(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+def cluster_destroy(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
-  out, errout, retval = run_cmd(session, PCS, "cluster", "destroy")
+  out, errout, retval = run_cmd(auth_user, PCS, "cluster", "destroy")
   if retval == 0
     return [200, "Successfully destroyed cluster"]
   else
@@ -1977,8 +1932,8 @@ def cluster_destroy(params, request, session)
   end
 end
 
-def get_wizard(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::READ)
+def get_wizard(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
   wizard = PCSDWizard.getWizard(params["wizard"])
@@ -1989,8 +1944,8 @@ def get_wizard(params, request, session)
   end
 end
 
-def wizard_submit(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def wizard_submit(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
   wizard = PCSDWizard.getWizard(params["wizard"])
@@ -2002,62 +1957,18 @@ def wizard_submit(params, request, session)
 
 end
 
-def auth_gui_against_nodes(params, request, session)
-  node_auth_error = {}
-  new_tokens = {}
-  threads = []
-  params.each { |node|
-    threads << Thread.new {
-      if node[0].end_with?("-pass") and node[0].length > 5
-        nodename = node[0][0..-6]
-        if params.has_key?("all")
-          pass = params["pass-all"]
-        else
-          pass = node[1]
-        end
-        data = {
-          'node-0' => nodename,
-          'username' => SUPERUSER,
-          'password' => pass,
-          'force' => 1,
-        }
-        node_auth_error[nodename] = 1
-        code, response = send_request(session, nodename, 'auth', true, data)
-        if 200 == code
-          token = response.strip
-          if not token.empty?
-            new_tokens[nodename] = token
-            node_auth_error[nodename] = 0
-          end
-        end
-      end
-    }
-  }
-  threads.each { |t| t.join }
-
-  if not new_tokens.empty?
-    cluster_nodes = get_corosync_nodes()
-    tokens_cfg = Cfgsync::PcsdTokens.from_file('')
-    sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
-      tokens_cfg, new_tokens, cluster_nodes, $cluster_name
-    )
-  end
-
-  return [200, JSON.generate({'node_auth_error' => node_auth_error})]
-end
-
 # not used anymore, left here for backward compatability reasons
-def get_tokens(params, request, session)
+def get_tokens(params, request, auth_user)
   # pcsd runs as root thus always returns hacluster's tokens
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, 'Permission denied'
   end
   return [200, JSON.generate(read_tokens)]
 end
 
-def get_cluster_tokens(params, request, session)
+def get_cluster_tokens(params, request, auth_user)
   # pcsd runs as root thus always returns hacluster's tokens
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, "Permission denied"
   end
   on, off = get_nodes
@@ -2066,9 +1977,9 @@ def get_cluster_tokens(params, request, session)
   return [200, JSON.generate(get_tokens_of_nodes(nodes))]
 end
 
-def save_tokens(params, request, session)
+def save_tokens(params, request, auth_user)
   # pcsd runs as root thus always returns hacluster's tokens
-  if not allowed_for_local_cluster(session, Permissions::FULL)
+  if not allowed_for_local_cluster(auth_user, Permissions::FULL)
     return 403, "Permission denied"
   end
 
@@ -2094,72 +2005,8 @@ def save_tokens(params, request, session)
   end
 end
 
-def add_node_to_cluster(params, request, session)
-  clustername = params["clustername"]
-  new_node = params["new_nodename"]
-
-  if clustername == $cluster_name
-    if not allowed_for_local_cluster(session, Permissions::FULL)
-      return 403, 'Permission denied'
-    end
-  end
-
-  tokens = read_tokens
-
-  if not tokens.include? new_node
-    return [400, "New node is not authenticated."]
-  end
-
-  # Save the new node token on all nodes in a cluster the new node is beeing
-  # added to. Send the token to one node and let the cluster nodes synchronize
-  # it by themselves.
-  token_data = {"node:#{new_node}" => tokens[new_node]}
-  retval, out = send_cluster_request_with_token(
-    # new node doesn't have config with permissions yet
-    PCSAuth.getSuperuserSession(), clustername, '/save_tokens', true, token_data
-  )
-  # If the cluster runs an old pcsd which doesn't support /save_tokens,
-  # ignore 404 in order to not prevent the node to be added.
-  if retval != 404 and retval != 200
-    return [400, 'Failed to save the token of the new node in target cluster.']
-  end
-
-  retval, out = send_cluster_request_with_token(
-    session, clustername, "/add_node_all", true, params
-  )
-  if 403 == retval
-    return [retval, out]
-  end
-  if retval != 200
-    return [400, "Failed to add new node '#{new_node}' into cluster '#{clustername}': #{out}"]
-  end
-
-  return [200, "Node added successfully."]
-end
-
-def fix_auth_of_cluster(params, request, session)
-  if not params["clustername"]
-    return [400, "cluster name not defined"]
-  end
-
-  clustername = params["clustername"]
-  nodes = get_cluster_nodes(clustername)
-  tokens_data = add_prefix_to_keys(get_tokens_of_nodes(nodes), "node:")
-
-  retval, out = send_cluster_request_with_token(
-    PCSAuth.getSuperuserSession(), clustername, "/save_tokens", true,
-    tokens_data, true
-  )
-  if retval == 404
-    return [400, "Old version of PCS/PCSD is running on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."]
-  elsif retval != 200
-    return [400, "Authentication failed."]
-  end
-  return [200, "Auhentication of nodes in cluster should be fixed."]
-end
-
-def resource_master(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_master(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -2167,7 +2014,7 @@ def resource_master(params, request, session)
     return [400, 'resource_id has to be specified.']
   end
   _, stderr, retval = run_cmd(
-    session, PCS, 'resource', 'master', params[:resource_id]
+    auth_user, PCS, 'resource', 'master', params[:resource_id]
   )
   if retval != 0
     return [400, 'Unable to create master/slave resource from ' +
@@ -2177,8 +2024,8 @@ def resource_master(params, request, session)
   return 200
 end
 
-def resource_change_group(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_change_group(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -2188,7 +2035,7 @@ def resource_change_group(params, request, session)
   if params[:group_id].empty?
     if params[:old_group_id]
       _, stderr, retval = run_cmd(
-        session, PCS, 'resource', 'group', 'remove', params[:old_group_id],
+        auth_user, PCS, 'resource', 'group', 'remove', params[:old_group_id],
         params[:resource_id]
       )
       if retval != 0
@@ -2200,7 +2047,7 @@ def resource_change_group(params, request, session)
     return 200
   end
   _, stderr, retval = run_cmd(
-    session,
+    auth_user,
     PCS, 'resource', 'group', 'add', params[:group_id], params[:resource_id]
   )
   if retval != 0
@@ -2211,8 +2058,8 @@ def resource_change_group(params, request, session)
   return 200
 end
 
-def resource_ungroup(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_ungroup(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -2221,7 +2068,7 @@ def resource_ungroup(params, request, session)
   end
   
   _, stderr, retval = run_cmd(
-    session, PCS, 'resource', 'ungroup', params[:group_id]
+    auth_user, PCS, 'resource', 'ungroup', params[:group_id]
   )
   if retval != 0
     return [400, 'Unable to ungroup group ' +
@@ -2231,8 +2078,8 @@ def resource_ungroup(params, request, session)
   return 200
 end
 
-def resource_clone(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_clone(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -2241,7 +2088,7 @@ def resource_clone(params, request, session)
   end
   
   _, stderr, retval = run_cmd(
-    session, PCS, 'resource', 'clone', params[:resource_id]
+    auth_user, PCS, 'resource', 'clone', params[:resource_id]
   )
   if retval != 0
     return [400, 'Unable to create clone resource from ' +
@@ -2251,8 +2098,8 @@ def resource_clone(params, request, session)
   return 200
 end
 
-def resource_unclone(params, request, session)
-  if not allowed_for_local_cluster(session, Permissions::WRITE)
+def resource_unclone(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -2261,7 +2108,7 @@ def resource_unclone(params, request, session)
   end
 
   _, stderr, retval = run_cmd(
-    session, PCS, 'resource', 'unclone', params[:resource_id]
+    auth_user, PCS, 'resource', 'unclone', params[:resource_id]
   )
   if retval != 0
     return [400, 'Unable to unclone ' +
@@ -2271,8 +2118,8 @@ def resource_unclone(params, request, session)
   return 200
 end
 
-def set_resource_utilization(params, reqest, session)
-  unless allowed_for_local_cluster(session, Permissions::WRITE)
+def set_resource_utilization(params, reqest, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -2282,10 +2129,10 @@ def set_resource_utilization(params, reqest, session)
 
   res_id = params[:resource_id]
   name = params[:name]
-  value = params[:value] if params[:value] else ''
+  value = params[:value] || ''
 
   _, stderr, retval = run_cmd(
-    session, PCS, 'resource', 'utilization', res_id, "#{name}=#{value}"
+    auth_user, PCS, 'resource', 'utilization', res_id, "#{name}=#{value}"
   )
 
   if retval != 0
@@ -2296,8 +2143,8 @@ def set_resource_utilization(params, reqest, session)
   return 200
 end
 
-def set_node_utilization(params, reqest, session)
-  unless allowed_for_local_cluster(session, Permissions::WRITE)
+def set_node_utilization(params, reqest, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
 
@@ -2307,10 +2154,10 @@ def set_node_utilization(params, reqest, session)
 
   node = params[:node]
   name = params[:name]
-  value = params[:value] if params[:value] else ''
+  value = params[:value] || ''
 
   _, stderr, retval = run_cmd(
-    session, PCS, 'node', 'utilization', node, "#{name}=#{value}"
+    auth_user, PCS, 'node', 'utilization', node, "#{name}=#{value}"
   )
 
   if retval != 0
@@ -2320,3 +2167,16 @@ def set_node_utilization(params, reqest, session)
   end
   return 200
 end
+
+def get_cluster_properties_definition(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  stdout, _, retval = run_cmd(
+    auth_user, PCS, 'property', 'get_cluster_properties_definition'
+  )
+  if retval == 0
+    return [200, stdout]
+  end
+  return [400, '{}']
+end
diff --git a/pcsd/resource.rb b/pcsd/resource.rb
index aaf61c9..3a079c8 100644
--- a/pcsd/resource.rb
+++ b/pcsd/resource.rb
@@ -1,10 +1,10 @@
 require 'pathname'
 
-def getResourcesGroups(session, get_fence_devices = false, get_all_options = false,
+def getResourcesGroups(auth_user, get_fence_devices = false, get_all_options = false,
   get_operations=false
 )
   stdout, stderror, retval = run_cmd(
-    session, CRM_MON, "--one-shot", "-r", "--as-xml"
+    auth_user, CRM_MON, "--one-shot", "-r", "--as-xml"
   )
   if retval != 0
     return [],[], retval
@@ -60,7 +60,7 @@ def getResourcesGroups(session, get_fence_devices = false, get_all_options = fal
   resource_list = resource_list.sort_by{|a| (a.group ? "1" : "0").to_s + a.group.to_s + "-" +  a.id}
 
   if get_all_options or get_operations
-    stdout, stderror, retval = run_cmd(session, "cibadmin", "-Q", "-l")
+    stdout, stderror, retval = run_cmd(auth_user, "cibadmin", "-Q", "-l")
     cib_output = stdout
     resources_inst_attr_map = {}
     resources_meta_attr_map = {}
@@ -167,7 +167,7 @@ def getAllConstraints(constraints_dom)
   return constraints
 end
 
-def getResourceMetadata(session, resourcepath)
+def getResourceMetadata(auth_user, resourcepath)
   options_required = {}
   options_optional = {}
   long_desc = ""
@@ -193,7 +193,7 @@ def getResourceMetadata(session, resourcepath)
     end
   else
     ENV['OCF_ROOT'] = OCF_ROOT
-    stdout, stderr, retval = run_cmd(session, resourcepath, 'meta-data')
+    stdout, stderr, retval = run_cmd(auth_user, resourcepath, 'meta-data')
     metadata = stdout.join
   end
 
@@ -244,9 +244,11 @@ def getResourceMetadata(session, resourcepath)
   [options_required, options_optional, [short_desc, long_desc]]
 end
 
-def getResourceAgents(session)
+def getResourceAgents(auth_user)
   resource_agent_list = {}
-  stdout, stderr, retval = run_cmd(session, PCS, "resource", "list", "--nodesc")
+  stdout, stderr, retval = run_cmd(
+    auth_user, PCS, "resource", "list", "--nodesc"
+  )
   if retval != 0
     $logger.error("Error running 'pcs resource list --nodesc")
     $logger.error(stdout + stderr)
diff --git a/pcsd/session.rb b/pcsd/session.rb
new file mode 100644
index 0000000..c202632
--- /dev/null
+++ b/pcsd/session.rb
@@ -0,0 +1,71 @@
+require 'rack/session/pool'
+
+class SessionPoolLifetime < Rack::Session::Pool
+
+  def initialize(app, options={})
+    super
+    @pool_timestamp = Hash.new()
+  end
+
+  def call(env)
+    # save session storage to env so we can get it later
+    env[:__session_storage] = self
+    super
+  end
+
+  def get_session(env, sid)
+    with_lock(env) do
+      now = Time.now()
+      # delete the session if expired
+      if @default_options[:expire_after] and sid and @pool_timestamp[sid] and
+        @pool_timestamp[sid] < (now - @default_options[:expire_after])
+      then
+        delete_session(sid)
+      end
+      # create new session if nonexistent
+      unless sid and session = @pool[sid]
+        sid, session = generate_sid, {}
+        @pool.store sid, session
+      end
+      # bump session's access time
+      @pool_timestamp[sid] = now
+      [sid, session]
+    end
+  end
+
+  def set_session(env, session_id, new_session, options)
+    with_lock(env) do
+      @pool.store session_id, new_session
+      # bump session's access time
+      @pool_timestamp[session_id] = Time.now()
+      session_id
+    end
+  end
+
+  def destroy_session(env, session_id, options)
+    with_lock(env) do
+      delete_session(session_id)
+      generate_sid unless options[:drop]
+    end
+  end
+
+  def drop_expired(env)
+    return unless lifetime = @default_options[:expire_after]
+    with_lock(env) {
+      threshold = Time.now() - lifetime
+      @pool_timestamp.select { |sid, timestamp|
+        timestamp < threshold
+      }.keys.each { |sid|
+        delete_session(sid)
+      }
+    }
+  end
+
+  private
+
+  def delete_session(sid)
+    @pool.delete(sid)
+    @pool_timestamp.delete(sid)
+  end
+end
+
diff --git a/pcsd/settings.rb.x86_64-linux-gnu.debian b/pcsd/settings.rb.debian
similarity index 86%
rename from pcsd/settings.rb.x86_64-linux-gnu.debian
rename to pcsd/settings.rb.debian
index e67ed54..5fb28da 100644
--- a/pcsd/settings.rb.x86_64-linux-gnu.debian
+++ b/pcsd/settings.rb.debian
@@ -9,8 +9,8 @@ OCF_ROOT = "/usr/lib/ocf"
 HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
 PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
 NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/'
-PENGINE = "/usr/lib/x86_64-linux-gnu/pacemaker/pengine"
-CIB_BINARY = '/usr/lib/x86_64-linux-gnu/pacemaker/cib'
+PENGINE = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/pengine"
+CIB_BINARY = '/usr/lib/DEB_HOST_MULTIARCH/pacemaker/cib'
 CRM_MON = "/usr/sbin/crm_mon"
 CRM_NODE = "/usr/sbin/crm_node"
 CRM_ATTRIBUTE = "/usr/sbin/crm_attribute"
diff --git a/pcsd/settings.rb.i386-linux-gnu.debian b/pcsd/settings.rb.i386-linux-gnu.debian
deleted file mode 100644
index 73d2c80..0000000
--- a/pcsd/settings.rb.i386-linux-gnu.debian
+++ /dev/null
@@ -1,24 +0,0 @@
-PCSD_EXEC_LOCATION = '/usr/share/pcsd/'
-PCSD_VAR_LOCATION = '/var/lib/pcsd/'
-
-CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
-KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
-COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret'
-
-OCF_ROOT = "/usr/lib/ocf"
-HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
-PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
-NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/'
-PENGINE = "/usr/lib/i386-linux-gnu/pacemaker/pengine"
-CIB_BINARY = '/usr/lib/i386-linux-gnu/pacemaker/cib'
-CRM_MON = "/usr/sbin/crm_mon"
-CRM_NODE = "/usr/sbin/crm_node"
-CRM_ATTRIBUTE = "/usr/sbin/crm_attribute"
-COROSYNC_BINARIES = "/usr/sbin/"
-CMAN_TOOL = "/usr/sbin/cman_tool"
-PACEMAKERD = "/usr/sbin/pacemakerd"
-CIBADMIN = "/usr/sbin/cibadmin"
-
-SUPERUSER = 'hacluster'
-ADMIN_GROUP = 'haclient'
-$user_pass_file = "pcs_users.conf"
diff --git a/pcsd/test/test_all_suite.rb b/pcsd/test/test_all_suite.rb
index bc02ac8..804e702 100644
--- a/pcsd/test/test_all_suite.rb
+++ b/pcsd/test/test_all_suite.rb
@@ -5,6 +5,7 @@ require 'test_corosyncconf.rb'
 require 'test_cluster.rb'
 require 'test_cluster_entity.rb'
 require 'test_auth.rb'
+require 'test_session.rb'
 require 'test_permissions.rb'
 require 'test_config.rb'
 require 'test_cfgsync.rb'
diff --git a/pcsd/test/test_auth.rb b/pcsd/test/test_auth.rb
index 0e9b1c0..1d8f718 100644
--- a/pcsd/test/test_auth.rb
+++ b/pcsd/test/test_auth.rb
@@ -35,60 +35,48 @@ class TestAuth < Test::Unit::TestCase
     password_file.write(JSON.pretty_generate(users))
     password_file.close()
 
-    session = {}
     cookies = {}
-    result = PCSAuth.loginByToken(session, cookies)
-    assert_equal(false, result)
-    assert_equal({}, session)
+    result = PCSAuth.loginByToken(cookies)
+    assert_equal(nil, result)
 
-    session = {}
     cookies = {'token' => 'tokenX'}
-    result = PCSAuth.loginByToken(session, cookies)
-    assert_equal(false, result)
-    assert_equal({}, session)
+    result = PCSAuth.loginByToken(cookies)
+    assert_equal(nil, result)
 
-    session = {}
     cookies = {'token' => 'token1'}
-    result = PCSAuth.loginByToken(session, cookies)
-    assert_equal(true, result)
+    result = PCSAuth.loginByToken(cookies)
     assert_equal(
       {:username => 'user1', :usergroups => ['group1', 'haclient']},
-      session
+      result
     )
 
-    session = {}
     cookies = {
       'token' => 'token1',
       'CIB_user' => 'userX',
       'CIB_user_groups' => PCSAuth.cookieUserEncode('groupX')
     }
-    result = PCSAuth.loginByToken(session, cookies)
-    assert_equal(true, result)
+    result = PCSAuth.loginByToken(cookies)
     assert_equal(
       {:username => 'user1', :usergroups => ['group1', 'haclient']},
-      session
+      result
     )
 
-    session = {}
     cookies = {'token' => 'tokenS'}
-    result = PCSAuth.loginByToken(session, cookies)
-    assert_equal(true, result)
+    result = PCSAuth.loginByToken(cookies)
     assert_equal(
       {:username => SUPERUSER, :usergroups => []},
-      session
+      result
     )
 
-    session = {}
     cookies = {
       'token' => 'tokenS',
       'CIB_user' => 'userX',
       'CIB_user_groups' => PCSAuth.cookieUserEncode('groupX')
     }
-    result = PCSAuth.loginByToken(session, cookies)
-    assert_equal(true, result)
+    result = PCSAuth.loginByToken(cookies)
     assert_equal(
       {:username => 'userX', :usergroups => ['groupX']},
-      session
+      result
     )
   end
 
diff --git a/pcsd/test/test_session.rb b/pcsd/test/test_session.rb
new file mode 100644
index 0000000..e72bf01
--- /dev/null
+++ b/pcsd/test/test_session.rb
@@ -0,0 +1,71 @@
+require 'test/unit'
+
+require 'pcsd_test_utils.rb'
+require 'session.rb'
+
+class TestSessionPool < Test::Unit::TestCase
+
+  def setup()
+    @env = {
+      'rack.multithread' => true,
+    }
+  end
+
+  def fixture_get_pool(lifetime)
+    pool = SessionPoolLifetime.new(nil, {:expire_after => lifetime,})
+    (1..3).each { |i| pool.set_session(@env, "sid#{i}", {'value' => i}, {}) }
+    return pool
+  end
+
+  def test_drop_expired_on_get()
+    lifetime = 2
+    pool = fixture_get_pool(lifetime)
+    # touch sessions each second
+    lifetime.times {
+      sleep(1)
+      assert_equal({'value' => 1}, pool.get_session(@env, 'sid1')[1])
+      assert_equal({'value' => 3}, pool.get_session(@env, 'sid3')[1])
+    }
+    # after @lifetime passes the unused session gets removed on access
+    sleep(1)
+    assert_equal({'value' => 1}, pool.get_session(@env, 'sid1')[1])
+    assert_equal({'value' => 3}, pool.get_session(@env, 'sid3')[1])
+    assert_equal({}, pool.get_session(@env, 'sid2')[1])
+  end
+
+  def test_drop_expired_explicit()
+    lifetime = 2
+    pool = fixture_get_pool(lifetime)
+    # touch sessions each second (otherwise they will be removed on access)
+    lifetime.times {
+      sleep(1)
+      pool.get_session(@env, 'sid2')
+      pool.set_session(@env, 'sid3', {'value' => 33}, {})
+    }
+    sleep(1)
+
+    pool.drop_expired(@env)
+    assert_equal(
+      {
+        'sid2' => {'value' => 2,},
+        'sid3' => {'value' => 33,},
+      },
+      pool.pool
+    )
+  end
+
+  def test_no_lifetime()
+    pool = fixture_get_pool(nil)
+    sleep(1)
+    assert_equal({'value' => 1}, pool.get_session(@env, 'sid1')[1])
+    assert_equal({'value' => 2}, pool.get_session(@env, 'sid2')[1])
+    assert_equal({'value' => 3}, pool.get_session(@env, 'sid3')[1])
+    sleep(1)
+    pool.drop_expired(@env)
+    assert_equal({'value' => 1}, pool.get_session(@env, 'sid1')[1])
+    assert_equal({'value' => 2}, pool.get_session(@env, 'sid2')[1])
+    assert_equal({'value' => 3}, pool.get_session(@env, 'sid3')[1])
+  end
+
+end
+
diff --git a/pcsd/views/_configure.erb b/pcsd/views/_configure.erb
index 9331621..421f384 100644
--- a/pcsd/views/_configure.erb
+++ b/pcsd/views/_configure.erb
@@ -14,24 +14,32 @@
 </tr>
 <tr id="configure_list_row" {{bind-attr style="Pcs.configure_page"}}>
   <td id="config" colspan=3>
-    <form>
-      <% @config_options.each { |page, options| %>
-        <table>
-          <% options.each { |co| %>
-            <tr title="<%= h(co.desc) %>">
-              <td class="label"><%= co.name %>:</td>
-              <td><%= co.html %><span class="units"><%= co.units %></span></td>
-            </tr>
-          <% } %>
-        </table>
-      <br>
-      <% } %>
-      <% if @config_options.length != 0 %>
-        <input type="submit" style="margin-left:20px;" class="text_field"
-          onclick="update_cluster_settings($(this).parent('form')); return false;"
-          value="Apply Changes"
-        >
-      <% end %>
+    {{input
+        type="text"
+        value=Pcs.settingsController.filter
+        placeholder="Filter"
+    }}
+    {{#if Pcs.settingsController.show_advanced}}
+      <button onclick="Pcs.settingsController.set('show_advanced', false);">Hide advanced settings</button>
+    {{else}}
+      <button onclick="Pcs.settingsController.set('show_advanced', true);">Show advanced settings</button>
+    {{/if}}
+    <form id="cluster_properties">
+      <table>
+        {{#each property in Pcs.settingsController.filtered}}
+          {{cluster-property prop=property name=property.name}}
+        {{else}}
+        <tr><td>No cluster properties available.</td></tr>
+        {{/each}}
+      </table>
+      {{#if Pcs.settingsController.filtered}}
+        <button onclick="update_cluster_settings(); return false;">
+          Apply Changes
+        </button>
+      {{/if}}
+      <button onclick="show_loading_screen(); refresh_cluster_properties(); return false;">
+        Refresh
+      </button>
     </form>
   </td>
 </tr>
diff --git a/pcsd/views/_dialogs.erb b/pcsd/views/_dialogs.erb
index 02d8eab..8bfa5c6 100644
--- a/pcsd/views/_dialogs.erb
+++ b/pcsd/views/_dialogs.erb
@@ -1,3 +1,22 @@
+<div id="dialog_login" style="display:none;">
+  <form id="login_form">
+    <div>Your session has expired. Log in again, please.</div>
+    <div id="login_form_denied" style="color:red; display:none;">
+      Bad username or password
+    </div>
+    <table>
+      <tr>
+        <td><label for="login_form_username">Username:</label></td>
+        <td><input type="text" id="login_form_username" name="username"></td>
+      </tr>
+      <tr>
+        <td><label for="login_form_password">Password:</label></td>
+        <td><input type="password" id="login_form_password" name="password"></td>
+      </tr>
+    </table>
+  </form>
+</div>
+
 <div id="auth_nodes" style="display:none;">
   <form id="auth_nodes_form">
     Enter password for user 'hacluster' to authenticate nodes.<br>
@@ -18,7 +37,7 @@
 </div>
 
 <div id="dialog_verify_remove_nodes" style="display:none;">
-  <p style="font-size:12px;">Are you sure you want to remove the following nodes(s)?</p>
+  <p style="font-size:12px;">Are you sure you want to remove the following node(s)?</p>
   <span class="name_list"></span>
   {{#if Pcs.is_cman_with_udpu_transport}}
     <p style="color: orange">This is a CMAN cluster with UDPU transport, cluster restart is required to apply node removal.</p>
diff --git a/pcsd/views/_permissions_cluster.erb b/pcsd/views/_permissions_cluster.erb
index 4048366..2aae036 100644
--- a/pcsd/views/_permissions_cluster.erb
+++ b/pcsd/views/_permissions_cluster.erb
@@ -26,7 +26,7 @@
     <script type="text/javascript">
       permissions_dependencies["<%= h(@cluster_name) %>"] = <%= @permissions_dependencies.to_json %>;
     </script>
-    <form method="post" action="/permissions_save/">
+    <form method="post" action="/managec/<%= h(@cluster_name) %>/permissions_save/">
       <input type="hidden" name="cluster_name" value="<%= h(@cluster_name) %>">
       <table class="datatable">
         <tr>
diff --git a/pcsd/views/login.erb b/pcsd/views/login.erb
index f004c64..4a9eaa3 100644
--- a/pcsd/views/login.erb
+++ b/pcsd/views/login.erb
@@ -1,12 +1,26 @@
 <div style="width:960px;margin-top: 25px;text-align:center;">
-  <% if params["badlogin"] == "1" %>
+  <% if session[:bad_login_name] %>
     <span style="color:red;font-size:24px;">Bad username or password</span>
   <% end %>
   <form action="/login" method="post">
     <table style="margin-left: auto; margin-right: auto;">
-      <tr><td style="text-align:right;">Username:</td><td><input type=text <%= "value='" +session[:bad_login_name] + "'" if params["badlogin"] and session[:bad_login_name] %> name=username></td></tr>
-      <tr><td style="text-align:right;">Password:</td><td><input type=password name=password></td></tr>
-      <tr><td align=center colspan=2><input type=submit name=Login value=Login></td></tr>
+      <tr>
+        <td style="text-align:right;">Username:</td>
+        <td>
+          <input type="text" name="username" <%=
+            "value='#{h(session[:bad_login_name])}'" if session[:bad_login_name]
+          %>>
+        </td>
+      </tr>
+      <tr>
+        <td style="text-align:right;">Password:</td>
+        <td><input type="password" name="password"></td>
+      </tr>
+      <tr>
+        <td align="center" colspan="2">
+          <input type="submit" name="Login" value="Login">
+        </td>
+      </tr>
     </table>
   </form>
 </div>
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index 4e6aff3..ae3c478 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -551,6 +551,43 @@ Use the 'Add' button to submit the form.">
       {{/if}}
     </table>
   </script>
+
+  <script type="text/x-handlebars" data-template-name="components/value-selector">
+    {{selector-helper content value prompt}}
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/cluster-property">
+    <td>
+      <span style="float: left;">
+        {{prop.readable_name}}
+      </span>
+      <span style="margin-left: 0.5em" class="infoicon sprites" {{bind-attr title=prop.description}}></span>
+    </td>
+    <td>
+      {{#if prop.is_boolean}}
+        {{value-selector
+            prompt="(Default)"
+            content=boolean_options
+            value=prop.cur_val
+            name=prop.form_name
+        }}
+      {{else}}{{#if prop.is_enum}}
+        {{value-selector
+            prompt="(Default)"
+            content=prop.enum_show
+            value=prop.cur_val
+            name=prop.form_name
+        }}
+      {{else}}
+      {{input
+          type="text"
+          name=prop.form_name
+          value=prop.cur_val
+          placeholder=prop.default
+      }}
+      {{/if}}{{/if}}
+    </td>
+  </script>
   
   <script type="text/x-handlebars">
 <div id="wrapper">
diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
index 1b53ec9..885b327 100644
--- a/pcsd/views/manage.erb
+++ b/pcsd/views/manage.erb
@@ -427,39 +427,3 @@ Specify ring 1 address for each node if you want to use RRP." %>
     </div>
   </form>
 </div>
-<div id="manage_error" style="display: none;">
-  <br>
-  <% if @error == "badclustername" %>
-    The cluster name, '<%=@errorval%>' has not yet been configured in pcsd.  Use 'Add Existing' to add a node which is part of the cluster.
-  <% end %>
-  <% if @error == "noname" %>
-    The node, '<%= @errorval %>', does not currently have a cluster configured.  You must create a cluster using this node before adding it to pcsd.
-  <% end %>
-  <% if @error == "duplicatename" %>
-    The cluster name, '<%=@errorval%>' has already been added to pcsd.  You may not add two clusters with the same name into pcsd.
-  <% end %>
-  <% if @error == "duplicatenodename" %>
-    The node, '<%=@errorval%>' is already configured in pcsd.  You may not add a node to two different clusters in pcsd.
-  <% end %>
-  <% if @error == "cannotgettokens" %>
-    Unable to get authentication info from cluster '<%=@errorval%>'.
-  <% end %>
-  <% if @error == "cannotsavetokens" %>
-    Unable to authenticate all nodes on node '<%=@errorval%>'.
-  <% end %>
-  <% if @error == "authimposible" %>
-    Operation successful.<br>Unable to do correct authentication of cluster because it is running old version of pcs/pcsd.
-  <% end %>
-  <% if @error == "unabletocreate" %>
-    Unable to create new cluster. If cluster already exists on one or more of the nodes run 'pcs cluster destroy' on all nodes to remove current cluster configuration.<br><br><%=nl2br(@errorval)%>
-  <% end %>
-  <% if @error == "configversionsconflict" %>
-    Configuration conflict detected.<br><br>Some nodes had a newer configuration than the local node.  Local node's configuration was updated.  Please repeat the last action if appropriate.
-  <% end %>
-  <% if @error == "permissiondenied" %>
-    Permission denied.
-  <% end %>
-  <% if @error == "genericerror" %>
-    <%=nl2br(@errorval)%>
-  <% end %>
-</div>
diff --git a/pcsd/views/permissions.erb b/pcsd/views/permissions.erb
index 1e38d7e..bfa5638 100644
--- a/pcsd/views/permissions.erb
+++ b/pcsd/views/permissions.erb
@@ -52,3 +52,4 @@
     </td>
   </tr>
 </table>
+<%= erb :_dialogs %>
diff --git a/setup.py b/setup.py
index c76d2b2..5c08e07 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
 from distutils.core import setup
 
 setup(name='pcs',
-    version='0.9.148',
+    version='0.9.149',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list