[Debian-ha-commits] [pcs] 01/04: New upstream version 0.9.161

Valentin Vidic vvidic-guest at moszumanska.debian.org
Fri Nov 3 17:35:17 UTC 2017


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository pcs.

commit f341b6cbc7517fc95d375c473805df1e89de3c17
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Fri Nov 3 10:21:40 2017 +0100

    New upstream version 0.9.161
---
 CHANGELOG.md                                       |   24 +
 Makefile                                           |    3 +
 README.md                                          |   12 +-
 pcs/app.py                                         |   20 +-
 pcs/cli/common/capabilities.py                     |   51 +
 pcs/cli/common/test/test_capabilities.py           |   75 +
 pcs/common/tools.py                                |    6 +-
 pcs/lib/commands/test/test_resource_agent.py       |   84 ++
 pcs/pcs.8                                          |    4 +-
 pcs/resource.py                                    |   55 +-
 pcs/settings_default.py                            |    2 +-
 pcs/test/cib_resource/test_create.py               |   24 +-
 pcs/test/cib_resource/test_stonith_create.py       |   21 +-
 pcs/test/resources/capabilities.xml                |   31 +
 .../resource_agent_ocf_heartbeat_dummy_utf8.xml    |   29 +
 pcs/test/test_cluster_pcmk_remote.py               |   13 +
 pcs/test/test_resource.py                          |   51 +-
 pcs/test/test_stonith.py                           |   42 +-
 pcs/test/tools/cib.py                              |   24 +-
 pcs/usage.py                                       |    3 +-
 pcs/utils.py                                       |   13 +-
 pcsd/bootstrap.rb                                  |   12 +-
 pcsd/capabilities.xml                              | 1536 ++++++++++++++++++++
 pcsd/pcs.rb                                        |   27 +-
 pcsd/pcsd.8                                        |    2 +-
 pcsd/pcsd.rb                                       |   40 +-
 pcsd/pcsd.service                                  |    2 +
 pcsd/pcsd.service.debian                           |    2 +
 pcsd/public/js/nodes-ember.js                      |   48 +-
 pcsd/remote.rb                                     |    7 +
 pcsd/settings.rb.debian                            |    1 +
 setup.py                                           |    2 +-
 32 files changed, 2166 insertions(+), 100 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 751d1aa..7990e68 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
 # Change Log
 
+## [0.9.161] - 2017-11-02
+
+### Added
+- List of pcs and pcsd capabilities ([rhbz#1230919])
+
+### Fixed
+- Fixed `pcs cluster auth` when already authenticated and using different port
+  ([rhbz#1415197])
+- It is now possible to restart a bundle resource on one node ([rhbz#1501274])
+- `resurce update` no longer exits with an error when the `remote-node` meta
+  attribute is set to the same value that it already has
+  ([rhbz#1502715], [ghissue#145])
+- Listing and describing resource and stonith agents no longer crashes when
+  agents' metadata contain non-ascii characters ([rhbz#1503110], [ghissue#151])
+
+[ghissue#145]: https://github.com/ClusterLabs/pcs/issues/145
+[ghissue#151]: https://github.com/ClusterLabs/pcs/issues/151
+[rhbz#1230919]: https://bugzilla.redhat.com/show_bug.cgi?id=1230919
+[rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197
+[rhbz#1501274]: https://bugzilla.redhat.com/show_bug.cgi?id=1501274
+[rhbz#1502715]: https://bugzilla.redhat.com/show_bug.cgi?id=1502715
+[rhbz#1503110]: https://bugzilla.redhat.com/show_bug.cgi?id=1503110
+
+
 ## [0.9.160] - 2017-10-09
 
 ### Added
diff --git a/Makefile b/Makefile
index ba23723..6423be3 100644
--- a/Makefile
+++ b/Makefile
@@ -28,6 +28,9 @@ endif
 ifeq ($(PYTHON_SITELIB), /usr/lib/python2.7/dist-packages)
   EXTRA_SETUP_OPTS="--install-layout=deb"
 endif
+ifeq ($(PYTHON_SITELIB), /usr/lib/python3/dist-packages)
+  EXTRA_SETUP_OPTS="--install-layout=deb"
+endif
 
 # Check for systemd presence
 ifeq ($(SYSTEMCTL_OVERRIDE),true)
diff --git a/README.md b/README.md
index b45ea12..d46aead 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@ pcs daemon, which operates as a remote server for pcs and provides a web UI.
 ### Installation from Source
 
 These are the runtime dependencies of pcs and pcsd:
-* python 2.6+
+* python 2.7+
 * python-lxml / python3-lxml
 * python-pycurl / python3-pycurl
 * python-setuptools / python3-setuptools
@@ -30,6 +30,8 @@ If you plan to manage Corosync 1.x based clusters, you will also need:
 * ccs
 
 It is however highly recommended for new clusters to use Corosync 2.x.
+Support for Corosync 1.x and CMAN has been deprecated in 0.9.160 and will be
+removed.
 
 Apart from the dependencies listed above, these are also required for
 installation:
@@ -51,8 +53,8 @@ compiled.
 
 To install pcs and pcsd run the following in terminal:
 ```shell
-# tar -xzvf pcs-0.9.159.tar.gz
-# cd pcs-0.9.159
+# tar -xzvf pcs-0.9.160.tar.gz
+# cd pcs-0.9.160
 # make install
 # make install_pcsd
 ```
@@ -73,10 +75,10 @@ Start pcsd and make it start on boot:
 ### Packages
 
 Currently this is built into Fedora, RHEL and its clones and Debian and its
-clones.
+derivates.
 * [Fedora package git repositories](http://pkgs.fedoraproject.org/cgit/rpms/pcs.git/)
 * [Current Fedora .spec](http://pkgs.fedoraproject.org/cgit/rpms/pcs.git/tree/pcs.spec)
-* [Debian-HA project home page](http://debian-ha.alioth.debian.org/)
+* [Debian-HA project home page](https://wiki.debian.org/Debian-HA)
 
 ---
 
diff --git a/pcs/app.py b/pcs/app.py
index f5cd2a8..6a8e650 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -29,7 +29,11 @@ from pcs import (
     alert,
 )
 
-from pcs.cli.common import completion, parse_args
+from pcs.cli.common import (
+    capabilities,
+    completion,
+    parse_args,
+)
 
 
 logging.basicConfig()
@@ -75,6 +79,13 @@ def main(argv=None):
         usage.main()
         sys.exit(1)
     argv = parse_args.filter_out_options(argv)
+
+    full = False
+    for option, dummy_value in pcs_options:
+        if option == "--full":
+            full = True
+            break
+
     for o, a in pcs_options:
         if not o in utils.pcs_options:
             if o in ["--watchdog", "--device"]:
@@ -104,6 +115,13 @@ def main(argv=None):
             settings.cluster_conf_file = a
         elif o == "--version":
             print(settings.pcs_version)
+            if full:
+                print(" ".join(
+                    sorted([
+                        feat["id"]
+                        for feat in capabilities.get_pcs_capabilities()
+                    ])
+                ))
             sys.exit()
         elif o == "--fullhelp":
             usage.full_usage()
diff --git a/pcs/cli/common/capabilities.py b/pcs/cli/common/capabilities.py
new file mode 100644
index 0000000..90d7ff1
--- /dev/null
+++ b/pcs/cli/common/capabilities.py
@@ -0,0 +1,51 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+from lxml import etree
+import os.path
+from textwrap import dedent
+
+from pcs.cli.common.console_report import error
+from pcs.common.tools import xml_fromstring
+from pcs.utils import get_pcsd_dir
+
+
+def get_capabilities_definition():
+    """
+    Read and parse capabilities file
+
+    The point is to return all data in python structures for further processing.
+    """
+    filename = os.path.join(get_pcsd_dir(), "capabilities.xml")
+    try:
+        with open(filename, mode="r") as file_:
+            capabilities_xml = xml_fromstring(file_.read())
+    except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e:
+        raise error(
+            "Cannot read capabilities definition file '{0}': '{1}'"
+            .format(filename, str(e))
+        )
+    capabilities = []
+    for feat_xml in capabilities_xml.findall(".//capability"):
+        feat = dict(feat_xml.attrib)
+        desc = feat_xml.find("./description")
+        # dedent and strip remove indentation in the XML file
+        feat["description"] = "" if desc is None else dedent(desc.text).strip()
+        capabilities.append(feat)
+    return capabilities
+
+def get_pcs_capabilities():
+    """
+    Get pcs capabilities form the capabilities file
+    """
+    return [
+        {
+            "id": feat["id"],
+            "description": feat["description"],
+        }
+        for feat in get_capabilities_definition()
+        if feat["in-pcs"] == "1"
+    ]
diff --git a/pcs/cli/common/test/test_capabilities.py b/pcs/cli/common/test/test_capabilities.py
new file mode 100644
index 0000000..af23485
--- /dev/null
+++ b/pcs/cli/common/test/test_capabilities.py
@@ -0,0 +1,75 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_unittest import mock, TestCase
+
+from pcs.cli.common import capabilities
+
+
+ at mock.patch("pcs.cli.common.capabilities.get_pcsd_dir", lambda: rc(""))
+class Capabilities(TestCase):
+    def test_get_definition(self):
+        self.assertEqual(
+            capabilities.get_capabilities_definition(),
+            [
+                {
+                    "id": "test.in-pcs",
+                    "in-pcs": "1",
+                    "in-pcsd": "0",
+                    "description": "This capability is available in pcs.",
+                },
+                {
+                    "id": "test.in-pcsd",
+                    "in-pcs": "0",
+                    "in-pcsd": "1",
+                    "description": "This capability is available in pcsd.",
+                },
+                {
+                    "id": "test.both",
+                    "in-pcs": "1",
+                    "in-pcsd": "1",
+                    "description":
+                        "This capability is available in both pcs and pcsd.",
+                },
+                {
+                    "id": "test.empty-description",
+                    "in-pcs": "1",
+                    "in-pcsd": "1",
+                    "description": "",
+                },
+                {
+                    "id": "test.no-description",
+                    "in-pcs": "1",
+                    "in-pcsd": "1",
+                    "description": "",
+                },
+            ]
+        )
+
+    def test_get_pcs(self):
+        self.assertEqual(
+            capabilities.get_pcs_capabilities(),
+            [
+                {
+                    "id": "test.in-pcs",
+                    "description": "This capability is available in pcs.",
+                },
+                {
+                    "id": "test.both",
+                    "description":
+                        "This capability is available in both pcs and pcsd.",
+                },
+                {
+                    "id": "test.empty-description",
+                    "description": "",
+                },
+                {
+                    "id": "test.no-description",
+                    "description": "",
+                },
+            ]
+        )
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
index 8b46208..ac7a0d4 100644
--- a/pcs/common/tools.py
+++ b/pcs/common/tools.py
@@ -4,9 +4,11 @@ from __future__ import (
     print_function,
 )
 
+import sys
 from lxml import etree
 import threading
 
+_PYTHON2 = sys.version[0] == "2"
 
 def simple_cache(func):
     cache = {
@@ -68,4 +70,6 @@ def xml_fromstring(xml):
     # ValueError: Unicode strings with encoding declaration are not supported.
     # Please use bytes input or XML fragments without declaration.
     # So we encode the string to bytes.
-    return etree.fromstring(xml.encode("utf-8"))
+    # In python2 we cannot do that as it causes a UnicodeDecodeError if the xml
+    # contains a non-ascii character.
+    return etree.fromstring(xml if _PYTHON2 else xml.encode("utf-8"))
diff --git a/pcs/lib/commands/test/test_resource_agent.py b/pcs/lib/commands/test/test_resource_agent.py
index 6e02941..4230cbc 100644
--- a/pcs/lib/commands/test/test_resource_agent.py
+++ b/pcs/lib/commands/test/test_resource_agent.py
@@ -1,3 +1,4 @@
+# coding=utf-8
 from __future__ import (
     absolute_import,
     division,
@@ -8,6 +9,7 @@ import logging
 from lxml import etree
 
 from pcs.test.tools.assertions import assert_raise_library_error, start_tag_error_text
+from pcs.test.tools.command_env import get_env_tools
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
 from pcs.test.tools.pcs_unittest import mock, TestCase
 
@@ -380,3 +382,85 @@ class TestDescribeAgent(TestCase):
 
         self.assertEqual(len(mock_metadata.mock_calls), 1)
         mock_guess.assert_not_called()
+
+
+class DescribeAgentUtf8(TestCase):
+    def setUp(self):
+        self.env_assist, self.config = get_env_tools(test_case=self)
+        self.config.runner.pcmk.load_agent(
+            agent_filename="resource_agent_ocf_heartbeat_dummy_utf8.xml"
+        )
+
+    def test_describe(self):
+        name = "ocf:heartbeat:Dummy"
+        self.assertEqual(
+            lib.describe_agent(self.env_assist.get_env(), name),
+            {
+                "name": name,
+                "shortdesc": u"Example stateless resource agent: ®",
+                "longdesc": u"This is a Dummy Resource Agent for testing utf-8"
+                    u" in metadata: ®"
+                ,
+                "parameters": [
+                    {
+                        "advanced": False,
+                        "default": u"/var/run/resource-agents/Dummy-®.state",
+                        "deprecated": False,
+                        "longdesc":
+                            u"Location to store the resource state in: ®",
+                        "name": u"state-®",
+                        "obsoletes": None,
+                        "pcs_deprecated_warning": "",
+                        "required": False,
+                        "shortdesc": u"State file: ®",
+                        "type": "string",
+                    },
+                    {
+                        "advanced": True,
+                        "default": 0,
+                        "deprecated": False,
+                        "longdesc": "Set to 1 to turn on resource agent tracing"
+                            " (expect large output) The trace output will be "
+                            "saved to trace_file, if set, or by default to "
+                            "$HA_VARRUN/ra_trace/<type>/<id>.<action>."
+                            "<timestamp> e.g. $HA_VARRUN/ra_trace/oracle/db."
+                            "start.2012-11-27.08:37:08",
+                        "name": "trace_ra",
+                        "obsoletes": None,
+                        "pcs_deprecated_warning": "",
+                        "required": False,
+                        "shortdesc": "Set to 1 to turn on resource agent "
+                            "tracing (expect large output)",
+                        "type": "integer",
+                    },
+                    {
+                        "advanced": True,
+                        "default": "",
+                        "deprecated": False,
+                        "longdesc": "Path to a file to store resource agent "
+                            "tracing log",
+                        "name": "trace_file",
+                        "obsoletes": None,
+                        "pcs_deprecated_warning": "",
+                        "required": False,
+                        "shortdesc": "Path to a file to store resource agent "
+                            "tracing log",
+                        "type": "string",
+                    }
+                ],
+                "actions": [
+                    {"name": "start", "timeout": "20"},
+                    {"name": "stop", "timeout": "20"},
+                    {"name": "monitor", "interval": "10", "timeout": "20"},
+                    {"name": "meta-data", "timeout": "5"},
+                    {"name": "validate-all", "timeout": "20"},
+                    {"name": u"custom-®", "timeout": "20"},
+                ],
+                "default_actions": [
+                    {"name": "start", "interval": "0s", "timeout": "20"},
+                    {"name": "stop", "interval": "0s", "timeout": "20"},
+                    {"name": "monitor", "interval": "10", "timeout": "20"},
+                    {"name": u"custom-®", "interval": "0s", "timeout": "20"},
+                ],
+            }
+        )
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 1944212..6c47103 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "October 2017" "pcs 0.9.160" "System Administration Utilities"
+.TH PCS "8" "November 2017" "pcs 0.9.161" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -18,7 +18,7 @@ Perform actions on file instead of active CIB.
 Print all network traffic and external commands run.
 .TP
 \fB\-\-version\fR
-Print pcs version information.
+Print pcs version information. List pcs capabilities if \fB\-\-full\fR is specified.
 .TP
 \fB\-\-request\-timeout\fR=<timeout>
 Timeout for each outgoing request to another node in seconds. Default is 60s.
diff --git a/pcs/resource.py b/pcs/resource.py
index 3812cb7..1a46ac7 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -281,10 +281,12 @@ def resource_list_options(lib, argv, modifiers):
 
 
 def _format_agent_description(description, stonith=False, show_advanced=False):
+    # We are getting data from XML which may contain unicode strings, hence we
+    # must format them to unicode strings (u"...".format(...))
     output = []
 
     if description.get("name") and description.get("shortdesc"):
-        output.append("{0} - {1}".format(
+        output.append(u"{0} - {1}".format(
             description["name"],
             _format_desc(
                 len(description["name"] + " - "),
@@ -319,7 +321,7 @@ def _format_agent_description(description, stonith=False, show_advanced=False):
                     param_desc = "No description available"
             if param.get("pcs_deprecated_warning"):
                 param_desc += " WARNING: " + param["pcs_deprecated_warning"]
-            output_params.append("  {0}: {1}".format(
+            output_params.append(u"  {0}: {1}".format(
                 param_title,
                 _format_desc(len(param_title) + 4, param_desc)
             ))
@@ -334,9 +336,9 @@ def _format_agent_description(description, stonith=False, show_advanced=False):
     if description.get("actions"):
         output_actions = []
         for action in description["default_actions"]:
-            parts = ["  {0}:".format(action.get("name", ""))]
+            parts = [u"  {0}:".format(action.get("name", ""))]
             parts.extend([
-                "{0}={1}".format(name, value)
+                u"{0}={1}".format(name, value)
                 for name, value in sorted(action.items())
                 if name != "name"
             ])
@@ -585,6 +587,7 @@ def resource_move(argv,clear=False,ban=False):
 
     if clear:
         if dest_node:
+            # both --host and --node works, but --host is deprecated
             output,ret = utils.run(["crm_resource", "--resource", resource_id, "--clear", "--host", dest_node] + other_options)
         else:
             output,ret = utils.run(["crm_resource", "--resource", resource_id, "--clear"] + other_options)
@@ -709,11 +712,6 @@ def resource_update(res_id,args, deal_with_guest_change=True):
 
 # Extract operation arguments
     ra_values, op_values, meta_values = parse_resource_options(args)
-    if deal_with_guest_change:
-        _detect_guest_change(
-            prepare_options(meta_values),
-            "--force" in utils.pcs_options,
-        )
 
     wait = False
     wait_timeout = None
@@ -797,6 +795,27 @@ def resource_update(res_id,args, deal_with_guest_change=True):
             instance_attributes.appendChild(ia)
 
     remote_node_name = utils.dom_get_resource_remote_node_name(resource)
+
+    if remote_node_name == guest_node.get_guest_option_value(prepare_options(meta_values)):
+        deal_with_guest_change = False
+
+    # The "remote-node" meta attribute makes sense (and causes creation of
+    # inner pacemaker resource) only for primitive. The meta attribute
+    # "remote-node" has no special meaining for clone/master. So there is no
+    # need for checking this attribute in clone/master.
+    #
+    # It is ok to not to check it until this point in this function:
+    # 1) Only master/clone element is updated if the parameter "res_id" is an id
+    # of the clone/master element. In that case another function is called and
+    # the code path does not reach this point.
+    # 2) No persistent changes happened until this line if the parameter
+    # "res_id" is an id of the primitive.
+    if deal_with_guest_change:
+        _detect_guest_change(
+            prepare_options(meta_values),
+            "--force" in utils.pcs_options,
+        )
+
     utils.dom_update_meta_attr(
         resource,
         utils.convert_args_to_tuples(meta_values)
@@ -2043,16 +2062,26 @@ def resource_restart(argv):
     node = None
     resource = argv.pop(0)
 
-    real_res = utils.dom_get_resource_clone_ms_parent(dom, resource)
+    real_res = (
+        utils.dom_get_resource_clone_ms_parent(dom, resource)
+        or
+        utils.dom_get_resource_bundle_parent(dom, resource)
+    )
     if real_res:
-        print("Warning: using %s... (if a resource is a clone or master/slave you must use the clone or master/slave name" % real_res.getAttribute("id"))
+        print("Warning: using %s... (if a resource is a clone, master/slave or bundle you must use the clone, master/slave or bundle name)" % real_res.getAttribute("id"))
         resource = real_res.getAttribute("id")
 
     args = ["crm_resource", "--restart", "--resource", resource]
     if len(argv) > 0:
         node = argv.pop(0)
-        if not utils.dom_get_clone(dom,resource) and not utils.dom_get_master(dom,resource):
-            utils.err("can only restart on a specific node for a clone or master/slave resource")
+        if not (
+            utils.dom_get_clone(dom, resource)
+            or
+            utils.dom_get_master(dom, resource)
+            or
+            utils.dom_get_bundle(dom, resource)
+        ):
+            utils.err("can only restart on a specific node for a clone, master/slave or bundle resource")
         args.extend(["--node", node])
 
     if "--wait" in utils.pcs_options:
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index 85117d0..ac2f02d 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -27,7 +27,7 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.160"
+pcs_version = "0.9.161"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
index eecc9b6..cfb2e64 100644
--- a/pcs/test/cib_resource/test_create.py
+++ b/pcs/test/cib_resource/test_create.py
@@ -4,6 +4,8 @@ from __future__ import (
     print_function,
 )
 
+import re
+
 from pcs.test.tools.misc import (
     get_test_resource as rc,
     skip_unless_pacemaker_supports_bundle,
@@ -1084,10 +1086,14 @@ class FailOrWarn(ResourceTest):
     def test_fail_when_nonexisting_agent(self):
         self.assert_pcs_fail(
             "resource create R ocf:heartbeat:NoExisting",
-            "Error: Agent 'ocf:heartbeat:NoExisting' is not installed or does"
-                " not provide valid metadata: Metadata query for"
-                " ocf:heartbeat:NoExisting failed: -5, use --force to"
-                " override\n"
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            stdout_regexp=re.compile("^"
+                "Error: Agent 'ocf:heartbeat:NoExisting' is not installed or "
+                "does not provide valid metadata: Metadata query for "
+                "ocf:heartbeat:NoExisting failed: (-5|Input/output error), use "
+                "--force to override\n"
+                "$", re.MULTILINE
+            )
         )
 
     def test_warn_when_forcing_noexistent_agent(self):
@@ -1104,9 +1110,13 @@ class FailOrWarn(ResourceTest):
                     </operations>
                 </primitive>
             </resources>""",
-            "Warning: Agent 'ocf:heartbeat:NoExisting' is not installed or does"
-            " not provide valid metadata: Metadata query for"
-            " ocf:heartbeat:NoExisting failed: -5\n"
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            output_regexp=re.compile("^"
+                "Warning: Agent 'ocf:heartbeat:NoExisting' is not installed or "
+                    "does not provide valid metadata: Metadata query for "
+                    "ocf:heartbeat:NoExisting failed: (-5|Input/output error)\n"
+                "$", re.MULTILINE
+            )
         )
 
 
diff --git a/pcs/test/cib_resource/test_stonith_create.py b/pcs/test/cib_resource/test_stonith_create.py
index 356319e..fcc361e 100644
--- a/pcs/test/cib_resource/test_stonith_create.py
+++ b/pcs/test/cib_resource/test_stonith_create.py
@@ -4,6 +4,8 @@ from __future__ import (
     print_function,
 )
 
+import re
+
 from pcs import utils
 from pcs.test.cib_resource.common import ResourceTest
 from pcs.test.tools import pcs_unittest as unittest
@@ -62,9 +64,13 @@ class PlainStonith(ResourceTest):
     def test_error_when_not_valid_agent(self):
         self.assert_pcs_fail(
             "stonith create S absent",
-            "Error: Agent 'absent' is not installed or does not provide valid"
-                " metadata: Metadata query for stonith:absent failed: -5, use"
-                " --force to override\n"
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            stdout_regexp=re.compile("^"
+                "Error: Agent 'absent' is not installed or does not provide "
+                "valid metadata: Metadata query for stonith:absent failed: "
+                "(-5|Input/output error), use --force to override\n"
+                "$", re.MULTILINE
+            )
         )
 
     def test_warning_when_not_valid_agent(self):
@@ -79,8 +85,13 @@ class PlainStonith(ResourceTest):
                     </operations>
                 </primitive>
             </resources>""",
-            "Warning: Agent 'absent' is not installed or does not provide valid"
-                " metadata: Metadata query for stonith:absent failed: -5\n"
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            output_regexp=re.compile("^"
+                "Warning: Agent 'absent' is not installed or does not provide "
+                    "valid metadata: Metadata query for stonith:absent failed: "
+                    "(-5|Input/output error)\n"
+                "$", re.MULTILINE
+            )
         )
 
     @need_load_xvm_fence_agent
diff --git a/pcs/test/resources/capabilities.xml b/pcs/test/resources/capabilities.xml
new file mode 100644
index 0000000..4758ee2
--- /dev/null
+++ b/pcs/test/resources/capabilities.xml
@@ -0,0 +1,31 @@
+<pcs-capabilities>
+  <capability-list>
+
+    <capability id="test.in-pcs" in-pcs="1" in-pcsd="0">
+      <description>
+        This capability is available in pcs.
+      </description>
+    </capability>
+
+    <capability id="test.in-pcsd" in-pcs="0" in-pcsd="1">
+      <description>
+        This capability is available in pcsd.
+      </description>
+    </capability>
+
+    <capability id="test.both" in-pcs="1" in-pcsd="1">
+      <description>
+        This capability is available in both pcs and pcsd.
+      </description>
+    </capability>
+
+    <capability id="test.empty-description" in-pcs="1" in-pcsd="1">
+      <description>
+      </description>
+    </capability>
+
+    <capability id="test.no-description" in-pcs="1" in-pcsd="1">
+    </capability>
+
+  </capability-list>
+</pcs-capabilities>
diff --git a/pcs/test/resources/resource_agent_ocf_heartbeat_dummy_utf8.xml b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy_utf8.xml
new file mode 100644
index 0000000..a312101
--- /dev/null
+++ b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy_utf8.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="Dummy®" version="0.9"
+  shortdesc="Example stateless resource agent: ®"
+>
+<longdesc lang="en">
+This is a Dummy Resource Agent for testing utf-8 in metadata: ®
+</longdesc>
+<version>1.0</version>
+
+<parameters>
+  <parameter name="state-®" unique="1">
+    <longdesc lang="en">
+      Location to store the resource state in: ®
+    </longdesc>
+    <shortdesc lang="en">State file: ®</shortdesc>
+    <content type="string" default="/var/run/resource-agents/Dummy-®.state" />
+  </parameter>
+</parameters>
+
+<actions>
+  <action name="start"        timeout="20" />
+  <action name="stop"         timeout="20" />
+  <action name="monitor"      timeout="20" interval="10" depth="0" />
+  <action name="meta-data"    timeout="5" />
+  <action name="validate-all" timeout="20" />
+  <action name="custom-®"     timeout="20" />
+</actions>
+</resource-agent>
diff --git a/pcs/test/test_cluster_pcmk_remote.py b/pcs/test/test_cluster_pcmk_remote.py
index e504fd8..e5ce410 100644
--- a/pcs/test/test_cluster_pcmk_remote.py
+++ b/pcs/test/test_cluster_pcmk_remote.py
@@ -299,6 +299,19 @@ class NodeAddGuest(ResourceTest):
             output=fixture_nolive_add_report
         )
 
+    def test_success_when_guest_node_matches_with_existing_guest(self):
+        # This test belongs to pcs/test/test_resource.py as it tests
+        # "resource update". But due to some fixtures it is more practical to
+        # keep it here.
+        self.create_resource()
+        self.assert_pcs_success(
+            "cluster node add-guest node-host G",
+            fixture_nolive_add_report
+        )
+        self.assert_pcs_success(
+            "resource update G meta remote-node=node-host",
+        )
+
     def test_success_with_options(self):
         self.create_resource()
         self.assert_effect(
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 29413cc..bd596f6 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -94,10 +94,12 @@ class ResourceDescribeTest(unittest.TestCase, AssertPcsMixin):
     def test_nonextisting_agent(self):
         self.assert_pcs_fail(
             "resource describe ocf:pacemaker:nonexistent",
-            (
-                "Error: Agent 'ocf:pacemaker:nonexistent' is not installed or"
-                " does not provide valid metadata: Metadata query for"
-                " ocf:pacemaker:nonexistent failed: -5\n"
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            stdout_regexp=re.compile("^"
+                "Error: Agent 'ocf:pacemaker:nonexistent' is not installed or "
+                "does not provide valid metadata: Metadata query for "
+                "ocf:pacemaker:nonexistent failed: (-5|Input/output error)\n"
+                "$", re.MULTILINE
             )
         )
 
@@ -270,6 +272,10 @@ class ResourceTest(unittest.TestCase, AssertPcsMixin):
         ))
 
     def testResourceUpdate(self):
+        # The idempotency with remote-node is tested in
+        # pcs/test/test_cluster_pcmk_remote.py in
+        #NodeAddGuest.test_success_when_guest_node_matches_with_existing_guest
+
         # see also BundleMiscCommands
         self.assert_pcs_success(
             "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
@@ -2841,9 +2847,21 @@ Ticket Constraints:
         ac(output,"Error: when specifying --master you must use the master id (D2-master)\n")
         assert returnVal == 1
 
-        output, returnVal  = pcs(temp_cib, "resource move D2-master --master")
-        ac(output,"Error: error moving/banning/clearing resource\nResource 'D2-master' not moved: active in 0 locations (promoted in 0).\nYou can prevent 'D2-master' from running on a specific location with: --ban --host <name>\nYou can prevent 'D2-master' from being promoted at a specific location with: --ban --master --host <name>\nError performing operation: Invalid argument\n\n")
-        assert returnVal == 1
+        self.assert_pcs_fail(
+            "resource move D2-master --master",
+            # pacemaker 1.1.18 changes --host to --node
+            stdout_regexp=re.compile("^"
+                "Error: error moving/banning/clearing resource\n"
+                "Resource 'D2-master' not moved: active in 0 locations "
+                    "\(promoted in 0\).\n"
+                "You can prevent 'D2-master' from running on a specific "
+                    "location with: --ban --(host|node) <name>\n"
+                "You can prevent 'D2-master' from being promoted at a specific "
+                    "location with: --ban --master --(host|node) <name>\n"
+                "Error performing operation: Invalid argument\n\n"
+                "$", re.MULTILINE
+            )
+        )
 
         self.assert_pcs_success("resource --full", outdent(
             """\
@@ -2871,9 +2889,22 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource move group1-master --master")
-        ac(o,"Error: error moving/banning/clearing resource\nResource 'group1-master' not moved: active in 0 locations (promoted in 0).\nYou can prevent 'group1-master' from running on a specific location with: --ban --host <name>\nYou can prevent 'group1-master' from being promoted at a specific location with: --ban --master --host <name>\nError performing operation: Invalid argument\n\n")
-        assert r == 1
+        self.assert_pcs_fail(
+            "resource move group1-master --master",
+            # pacemaker 1.1.18 changes --host to --node
+            stdout_regexp=re.compile("^"
+                "Error: error moving/banning/clearing resource\n"
+                "Resource 'group1-master' not moved: active in 0 locations "
+                    "\(promoted in 0\).\n"
+                "You can prevent 'group1-master' from running on a specific "
+                    "location with: --ban --(host|node) <name>\n"
+                "You can prevent 'group1-master' from being promoted at a "
+                    "specific location with: --ban --master --(host|node) "
+                    "<name>\n"
+                "Error performing operation: Invalid argument\n\n"
+                "$", re.MULTILINE
+            )
+        )
 
     def testDebugStartCloneGroup(self):
         o,r = pcs("resource create D0 ocf:heartbeat:Dummy --group DGroup")
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 5bd13eb..becc1a1 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -4,6 +4,7 @@ from __future__ import (
     print_function,
 )
 
+import re
 import shutil
 
 from pcs import utils
@@ -72,10 +73,12 @@ Stonith options:
     def test_nonextisting_agent(self):
         self.assert_pcs_fail(
             "stonith describe fence_noexist",
-            (
-                "Error: Agent 'fence_noexist' is not installed or does not"
-                " provide valid metadata: Metadata query for"
-                " stonith:fence_noexist failed: -5\n"
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            stdout_regexp=re.compile("^"
+                "Error: Agent 'fence_noexist' is not installed or does not "
+                "provide valid metadata: Metadata query for "
+                "stonith:fence_noexist failed: (-5|Input/output error)\n"
+                "$", re.MULTILINE
             )
         )
 
@@ -98,13 +101,28 @@ class StonithTest(TestCase, AssertPcsMixin):
         shutil.copy(empty_cib, temp_cib)
 
     def testStonithCreation(self):
-        output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist")
-        ac(output, "Error: Agent 'fence_noxist' is not installed or does not provide valid metadata: Metadata query for stonith:fence_noxist failed: -5, use --force to override\n")
-        assert returnVal == 1
+        self.assert_pcs_fail(
+            "stonith create test1 fence_noexist",
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            stdout_regexp=re.compile("^"
+                "Error: Agent 'fence_noexist' is not installed or does not "
+                "provide valid metadata: Metadata query for "
+                "stonith:fence_noexist failed: (-5|Input/output error), use "
+                "--force to override\n"
+                "$", re.MULTILINE
+            )
+        )
 
-        output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist --force")
-        ac(output, "Warning: Agent 'fence_noxist' is not installed or does not provide valid metadata: Metadata query for stonith:fence_noxist failed: -5\n")
-        self.assertEqual(returnVal, 0)
+        self.assert_pcs_success(
+            "stonith create test1 fence_noexist --force",
+            # pacemaker 1.1.18 changes -5 to Input/output error
+            stdout_regexp=re.compile("^"
+                "Warning: Agent 'fence_noexist' is not installed or does not "
+                "provide valid metadata: Metadata query for "
+                "stonith:fence_noexist failed: (-5|Input/output error)\n"
+                "$", re.MULTILINE
+            )
+        )
 
         self.assert_pcs_fail(
             "stonith create test2 fence_apc",
@@ -196,7 +214,7 @@ class StonithTest(TestCase, AssertPcsMixin):
 
         output, returnVal = pcs(temp_cib, "stonith show --full")
         ac(output, """\
- Resource: test1 (class=stonith type=fence_noxist)
+ Resource: test1 (class=stonith type=fence_noexist)
   Operations: monitor interval=60s (test1-monitor-interval-60s)
  Resource: test2 (class=stonith type=fence_apc)
   Operations: monitor interval=60s (test2-monitor-interval-60s)
@@ -221,7 +239,7 @@ class StonithTest(TestCase, AssertPcsMixin):
             Resources:
 
             Stonith Devices:
-             Resource: test1 (class=stonith type=fence_noxist)
+             Resource: test1 (class=stonith type=fence_noexist)
               Operations: monitor interval=60s (test1-monitor-interval-60s)
              Resource: test2 (class=stonith type=fence_apc)
               Operations: monitor interval=60s (test2-monitor-interval-60s)
diff --git a/pcs/test/tools/cib.py b/pcs/test/tools/cib.py
index 69e0fea..45ded43 100644
--- a/pcs/test/tools/cib.py
+++ b/pcs/test/tools/cib.py
@@ -45,19 +45,33 @@ def get_assert_pcs_effect_mixin(get_cib_part):
                     )
                 )
 
-        def assert_effect_single(self, command, expected_xml, output=""):
-            self.assert_pcs_success(command, output)
+        def assert_effect_single(
+            self, command, expected_xml, output=None, output_start=None,
+            output_regexp=None
+        ):
+            self.assert_pcs_success(
+                command, output, output_start, output_regexp
+            )
             self.assert_resources_xml_in_cib(expected_xml)
 
-        def assert_effect(self, alternative_cmds, expected_xml, output=""):
+        def assert_effect(
+            self, alternative_cmds, expected_xml, output=None,
+            output_start=None, output_regexp=None
+        ):
             alternative_list = (
                 alternative_cmds if isinstance(alternative_cmds, list)
                 else [alternative_cmds]
             )
             cib_content = open(self.temp_cib).read()
             for alternative in alternative_list[:-1]:
-                self.assert_effect_single(alternative, expected_xml, output)
+                self.assert_effect_single(
+                    alternative, expected_xml,
+                    output, output_start, output_regexp
+                )
                 open(self.temp_cib, "w").write(cib_content)
 
-            self.assert_effect_single(alternative_list[-1], expected_xml, output)
+            self.assert_effect_single(
+                alternative_list[-1], expected_xml,
+                output, output_start, output_regexp
+            )
     return AssertPcsEffectMixin
diff --git a/pcs/usage.py b/pcs/usage.py
index 96b98fb..7481014 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -157,7 +157,8 @@ Options:
     -h, --help         Display usage and exit.
     -f file            Perform actions on file instead of active CIB.
     --debug            Print all network traffic and external commands run.
-    --version          Print pcs version information.
+    --version          Print pcs version information. List pcs capabilities if
+                       --full is specified.
     --request-timeout  Timeout for each outgoing request to another node in
                        seconds. Default is 60s.
     --force            Override checks and errors, the exact behavior depends on
diff --git a/pcs/utils.py b/pcs/utils.py
index 174f8e7..cbaa7a4 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -1048,6 +1048,13 @@ def cmd_runner():
         env_vars
     )
 
+def get_pcsd_dir():
+    pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
+    if pcs_dir == "/usr/sbin":
+        return settings.pcsd_exec_location
+    else:
+        return os.path.join(pcs_dir, '../pcsd')
+
 def run_pcsdcli(command, data=None):
     if not data:
         data = dict()
@@ -1058,11 +1065,7 @@ def run_pcsdcli(command, data=None):
         env_var["PCSD_NETWORK_TIMEOUT"] = str(pcs_options["--request-timeout"])
     else:
         env_var["PCSD_NETWORK_TIMEOUT"] = str(settings.default_request_timeout)
-    pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
-    if pcs_dir == "/usr/sbin":
-        pcsd_dir_path = settings.pcsd_exec_location
-    else:
-        pcsd_dir_path = os.path.join(pcs_dir, '../pcsd')
+    pcsd_dir_path = get_pcsd_dir()
     pcsdcli_path = os.path.join(pcsd_dir_path, 'pcsd-cli.rb')
     gem_home = os.path.join(pcsd_dir_path, 'vendor/bundle/ruby')
     env_var["GEM_HOME"] = gem_home
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index a97cc29..3fac787 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -34,10 +34,14 @@ def is_systemctl()
   return false
 end
 
-def get_pcs_path()
-  pcsd_path = Pathname.new(
+def get_pcsd_path()
+  return Pathname.new(
       File.expand_path(File.dirname(__FILE__))
-    ).realpath.to_s
+    ).realpath
+end
+
+def get_pcs_path()
+  pcsd_path = get_pcsd_path().to_s
   if PCSD_EXEC_LOCATION == pcsd_path or PCSD_EXEC_LOCATION == (pcsd_path + '/')
     return PCS_EXEC
   else
@@ -45,7 +49,7 @@ def get_pcs_path()
   end
 end
 
-PCS_VERSION = '0.9.160'
+PCS_VERSION = '0.9.161'
 # unique instance signature, allows detection of dameon restarts
 DAEMON_INSTANCE_SIGNATURE = Digest::SHA2.hexdigest("#{Time.now} #{rand()}")
 COROSYNC = COROSYNC_BINARIES + "corosync"
diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
new file mode 100644
index 0000000..ee9a822
--- /dev/null
+++ b/pcsd/capabilities.xml
@@ -0,0 +1,1536 @@
+<pcs-capabilities>
+  <capability-list>
+    <!--
+      "optionally" means a user or client may but is not required to specify
+      given parameters. The provider of a feature/capability is required to
+      implement means for specifying and processing these parameters.
+    -->
+
+    <capability id="booth" in-pcs="1" in-pcsd="0">
+      <description>
+        Supports booth cluster ticket manager:
+        * create and delete a booth config file and keyfile
+        * configure booth tickets in the config file
+        * grant and revoke booth tickets
+        * manage booth service for running booth on a single host: start, stop,
+          enable, disable
+        * manage booth resource for running booth as a cluster resource:
+          create, delete, restart
+        * pull a config from a host, sync a config to the local cluster
+        * show booth status
+        All changes are done on the local host only and have to be manually
+        pushed (synchronized) to all cluster nodes and other arbitrators /
+        sites (unles a feature which does this atomatically is implemented and
+        added to this list).
+
+        pcs commands: booth
+      </description>
+    </capability>
+    <capability id="booth.set-config" in-pcs="0" in-pcsd="1">
+      <description>
+        Save a booth config and key file to the local host.
+
+        daemon urls: booth_set_config
+      </description>
+    </capability>
+    <capability id="booth.set-config.multiple" in-pcs="0" in-pcsd="1">
+      <description>
+        Save more booth configs and keys to the local host.
+
+        daemon urls: booth_save_files
+      </description>
+    </capability>
+    <capability id="booth.get-config" in-pcs="0" in-pcsd="1">
+      <description>
+        Provide the local booth config and key file.
+
+        daemon urls: booth_get_config
+      </description>
+    </capability>
+
+
+
+    <capability id="cluster.config.backup-local" in-pcs="1" in-pcsd="0">
+      <description>
+        Create a tarball containing cluster configuration files (corosync,
+        pacemaker, pcsd) from the local host.
+
+        pcs commands: config backup
+      </description>
+    </capability>
+    <capability id="cluster.config.restore-cluster" in-pcs="1" in-pcsd="0">
+      <description>
+        Restore the cluster configuration on all cluster nodes from a tarball
+        created by pcs backup. List of cluster nodes is extracted from the
+        tarball.
+
+        pcs commands: config restore
+      </description>
+    </capability>
+    <capability id="cluster.config.restore-local" in-pcs="1" in-pcsd="1">
+      <description>
+        Restore the cluster configuration on the local host from a tarball
+        created by pcs backup.
+
+        pcs commands: config restore --local
+        daemon urls: config_restore
+      </description>
+    </capability>
+
+
+
+    <capability id="cluster.config.export.to-pcs-commands" in-pcs="1" in-pcsd="0">
+      <description>
+        Creates a list of pcs commands which upon execution recreates
+        the current cluster running on the local host.
+
+        pcs commands: config export pcs-commands[-verbose]
+      </description>
+    </capability>
+    <capability id="cluster.config.import-cman" in-pcs="1" in-pcsd="0">
+      <description>
+        Converts CMAN cluster configuration files to a Corosync/Pacemaker
+        cluster config files or pcs commands.
+
+        pcs commands: config import-cman
+      </description>
+    </capability>
+
+
+
+    <capability id="cluster.create" in-pcs="1" in-pcsd="1">
+      <description>
+        Set up a new cluster.
+
+        pcs commands: cluster setup
+        daemon urls: setup_cluster
+      </description>
+    </capability>
+    <capability id="cluster.create.enable" in-pcs="1" in-pcsd="0">
+      <description>
+        Enable cluster services on cluster nodes during cluster setup.
+
+        pcs commands: cluster setup --enable
+      </description>
+    </capability>
+    <capability id="cluster.create.start" in-pcs="1" in-pcsd="0">
+      <description>
+        Start cluster services on cluster nodes during cluster setup.
+
+        pcs commands: cluster setup --start
+      </description>
+    </capability>
+    <capability id="cluster.create.start.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for cluster nodes to start during cluster setup if start requested.
+
+        pcs commands: cluster setup --start --wait[=timeout]
+      </description>
+    </capability>
+    <capability id="cluster.destroy" in-pcs="1" in-pcsd="1">
+      <description>
+        Destroy cluster on the local host.
+
+        pcs commands: cluster destroy
+        daemon urls: cluster_destroy
+      </description>
+    </capability>
+    <capability id="cluster.destroy.all" in-pcs="1" in-pcsd="0">
+      <description>
+        Destroy cluster on all nodes defined in a cluster which the local host
+        belongs to.
+
+        pcs commands: cluster destroy --all
+      </description>
+    </capability>
+    <capability id="cluster.report" in-pcs="1" in-pcsd="0">
+      <description>
+        Create a tarball containing everything needed when reporting cluster
+        problems.
+
+        pcs commands: cluster report
+      </description>
+    </capability>
+    <capability id="cluster.verify" in-pcs="1" in-pcsd="0">
+      <description>
+        Check cluster configuration for errors.
+      </description>
+    </capability>
+
+
+
+    <capability id="corosync.config.get" in-pcs="1" in-pcsd="1">
+      <description>
+        Provide the local corosync.conf file.
+
+        pcs commands: cluster corosync
+        daemon urls: get_corosync_conf
+      </description>
+    </capability>
+    <capability id="corosync.config.set" in-pcs="0" in-pcsd="1">
+      <description>
+        Save a corosync.conf file to the local host.
+
+        daemon urls: set_corosync_conf
+      </description>
+    </capability>
+    <capability id="corosync.config.reload" in-pcs="1" in-pcsd="0">
+      <description>
+        Instruct corosync to reload its configuration from its config files.
+
+        pcs commands: cluster reload corosync
+      </description>
+    </capability>
+    <capability id="corosync.config.sync-to-local-cluster" in-pcs="1" in-pcsd="0">
+      <description>
+        Push corosync configuration from the local host to all cluster nodes.
+
+        pcs commands: cluster sync
+      </description>
+    </capability>
+    <capability id="corosync.qdevice" in-pcs="1" in-pcsd="0">
+      <description>
+        Set up, manage and show status of a corosync qdevice. Specific
+        functionality depends on supported model.
+
+        pcs commands: pcs qdevice
+      </description>
+    </capability>
+    <capability id="corosync.qdevice.model.net" in-pcs="1" in-pcsd="0">
+      <description>
+        Set up and manage a corosync qdevice model "net": setup, destroy, start,
+        stop, kill, enable, disable, show status.
+
+        pcs commands: pcs qdevice
+      </description>
+    </capability>
+    <capability id="corosync.qdevice.model.net.certificates" in-pcs="0" in-pcsd="1">
+      <description>
+        Provide the certificate of the qdevice instance's certificate authority
+        and sign certificate requests.
+
+        daemon urls: qdevice_net_get_ca_certificate,
+          qdevice_net_sign_node_certificate
+      </description>
+    </capability>
+    <capability id="corosync.quorum" in-pcs="1" in-pcsd="0">
+      <description>
+        Show and change corosync quorum configuration.
+
+        pcs commands: quorum config, quorum update
+      </description>
+    </capability>
+    <capability id="corosync.quorum.status" in-pcs="1" in-pcsd="1">
+      <description>
+        Show corosync quorum status on a node.
+
+        pcs commands: quorum status
+        daemon urls: get_quorum_info
+      </description>
+    </capability>
+    <capability id="corosync.quorum.device" in-pcs="1" in-pcsd="0">
+      <description>
+        Show and change corosync qdevice configuration.
+
+        pcs commands: quorum device (add | remove | update | status)
+      </description>
+    </capability>
+    <capability id="corosync.quorum.device.client" in-pcs="0" in-pcsd="1">
+      <description>
+        Control qdevice client service: start, stop, enable, disable
+
+        daemon urls: qdevice_client_start, qdevice_client_stop,
+          qdevice_client_enable, qdevice_client_disable
+      </description>
+    </capability>
+    <capability id="corosync.quorum.device.model.net" in-pcs="1" in-pcsd="0">
+      <description>
+        Show and change corosync qdevice model "net" configuration.
+
+        pcs commands: quorum device (add | remove | update | status)
+      </description>
+    </capability>
+    <capability id="corosync.quorum.device.client.model.net.certificates" in-pcs="0" in-pcsd="1">
+      <description>
+        Set up and destroy certificates in qdevice model net client.
+
+        daemon urls: qdevice_net_client_init_certificate_storage,
+          qdevice_net_client_import_certificate, qdevice_net_client_destroy
+      </description>
+    </capability>
+    <capability id="corosync.quorum.set-expected-votes-runtime" in-pcs="1" in-pcsd="0">
+      <description>
+        Set expected votes in the live cluster to specified value.
+
+        pcs commands: quorum expected-votes
+      </description>
+    </capability>
+    <capability id="corosync.quorum.unblock" in-pcs="1" in-pcsd="0">
+      <description>
+        Cancel waiting for all nodes when establishing quorum.
+
+        pcs commands: quorum unblock
+      </description>
+    </capability>
+    <capability id="corosync.uidgid" in-pcs="1" in-pcsd="0">
+      <description>
+        Show and change configured uids and gids allowed to connect to corosync.
+      </description>
+    </capability>
+
+
+
+    <capability id="node.add" in-pcs="1" in-pcsd="1">
+      <description>
+        Add a new node to a cluster.
+
+        pcs commands: cluster node add
+        daemon urls: add_node_all
+      </description>
+    </capability>
+    <capability id="node.add.local" in-pcs="1" in-pcsd="1">
+      <description>
+        Add a new node to a cluster on the local host only (pcs/pcsd internal
+        function).
+
+        pcs commands: cluster localnode add
+        daemon urls: add_node
+      </description>
+    </capability>
+    <capability id="node.add.enable" in-pcs="1" in-pcsd="0">
+      <description>
+        Enable cluster services on the new cluster node during adding a node.
+
+        pcs commands: cluster node add --enable
+      </description>
+    </capability>
+    <capability id="node.add.start" in-pcs="1" in-pcsd="0">
+      <description>
+        Start cluster services on the new cluster node during adding a node.
+
+        pcs commands: cluster node add --start
+      </description>
+    </capability>
+    <capability id="node.add.start.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the new node start cluster service during adding a node if
+        start requested.
+
+        pcs commands: cluster node add --start --wait[=timeout]
+      </description>
+    </capability>
+    <capability id="node.add.enable-and-start" in-pcs="0" in-pcsd="1">
+      <description>
+        Enable and start cluster services on the new cluster node during adding
+        a node, cannot do "enable" and "start" separately.
+
+        daemon urls: add_node, add_node_all (param: auto_start)
+      </description>
+    </capability>
+    <capability id="node.remove" in-pcs="1" in-pcsd="1">
+      <description>
+        Remove a node from a cluster.
+
+        pcs commands: cluster node remove
+        daemon urls: remove_nodes
+      </description>
+    </capability>
+    <capability id="node.remove.local" in-pcs="1" in-pcsd="1">
+      <description>
+        Remove a node from a cluster on the local host only (pcs/pcsd internal
+        function).
+
+        pcs commands: cluster localnode remove
+        daemon urls: remove_node
+      </description>
+    </capability>
+    <capability id="node.remove.list" in-pcs="0" in-pcsd="1">
+      <description>
+        Remove several nodes from a cluster at once.
+
+        daemon urls: remove_nodes
+      </description>
+    </capability>
+    <capability id="node.remove-from-caches" in-pcs="1" in-pcsd="0">
+      <description>
+        Remove specified node from various cluster caches.
+
+        pcs commands: cluster node clear
+      </description>
+    </capability>
+    <capability id="node.guest" in-pcs="1" in-pcsd="0">
+      <description>
+        Add (based on an existing cluster resource) and remove a guest node to
+        a cluster. When adding, it is possible to wait for the node to start.
+
+        pcs commands: cluster node ( add-guest | remove-guest )
+      </description>
+    </capability>
+    <capability id="node.remote" in-pcs="1" in-pcsd="0">
+      <description>
+        Add and remove a remote node to a cluster. When adding, it is possible
+        to specify operations and instance and meta attributes for the newly
+        created pcmk_remote resource and wait for the new remote node to start.
+
+        pcs commands: cluster node ( add-remote | remove-remote )
+      </description>
+    </capability>
+
+
+
+    <capability id="node.start-stop-enable-disable" in-pcs="1" in-pcsd="1">
+      <description>
+        Allow starting, stopping, enabling and disabling cluster services on one
+        node or the local host if no node specified.
+
+        pcs commands: cluster (start | stop | enable | disable)
+        daemon urls: cluster_start, cluster_stop, cluster_enable,
+          cluster_disable
+      </description>
+    </capability>
+    <capability id="node.start-stop-enable-disable.all" in-pcs="1" in-pcsd="0">
+      <description>
+        Allow starting, stopping, enabling and disabling cluster services on all
+        nodes in the cluster.
+
+        pcs commands: cluster (start | stop | enable | disable) --all
+      </description>
+    </capability>
+    <capability id="node.start-stop-enable-disable.list" in-pcs="1" in-pcsd="0">
+      <description>
+        Allow starting, stopping, enabling and disabling cluster services on
+        specified nodes in a cluster.
+
+        pcs commands: cluster (start | stop | enable | disable) node1 node2 ...
+      </description>
+    </capability>
+    <capability id="node.start-stop-enable-disable.start-wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the cluster nodes to start.
+
+        pcs commands: cluster start --wait[=timeout]
+      </description>
+    </capability>
+    <capability id="node.start-stop-enable-disable.stop-component" in-pcs="0" in-pcsd="1">
+      <description>
+        At the node level, provide means for stopping services separatelly so
+        the nodes stopping can be synchronized on the cluster level.
+
+        daemon urls: cluster_stop (param: component)
+      </description>
+    </capability>
+    <capability id="node.kill" in-pcs="1" in-pcsd="0">
+      <description>
+        Kill cluster services on the local node.
+
+        pcs commands: cluster kill
+      </description>
+    </capability>
+    <capability id="node.restart" in-pcs="0" in-pcsd="1">
+      <description>
+        Restart one host machine or the local host machine if no host specified.
+
+        daemon urls: node_restart
+      </description>
+    </capability>
+
+
+
+    <capability id="node.attributes" in-pcs="1" in-pcsd="1">
+      <description>
+        Show node attributes, add and remove a node attribute.
+
+        pcs commands: node attribute
+        daemon urls: add_node_attr_remote
+      </description>
+    </capability>
+    <capability id="node.attributes.set-list-for-node" in-pcs="1" in-pcsd="0">
+      <description>
+        Set list of node attributes for a node.
+
+        pcs commands: node attribute
+      </description>
+    </capability>
+    <capability id="node.maintenance" in-pcs="1" in-pcsd="0">
+      <description>
+        Put one node or the local host if no node specified to and from
+        maintenance mode. If this functionality is not available,
+        node.attributes can be used instead to set the maintenance attribute.
+
+        pcs commands: node ( maintenance | unmaintenance )
+      </description>
+    </capability>
+    <capability id="node.maintenance.all" in-pcs="1" in-pcsd="0">
+      <description>
+        Put all cluster nodes defined in a cluster which the local host belongs
+        to to and from maintenance mode.
+
+        pcs commands: node ( maintenance | unmaintenance ) --all
+      </description>
+    </capability>
+    <capability id="node.maintenance.list" in-pcs="1" in-pcsd="0">
+      <description>
+        Put specified nodes in a cluster to and from maintenance mode.
+
+        pcs commands: node ( maintenance | unmaintenance ) node1 node2 ...
+      </description>
+    </capability>
+    <capability id="node.maintenance.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for nodes to finish transition to or from maintenance mode.
+
+        pcs commands: node ( maintenance | unmaintenance ) --wait[=timeout]
+      </description>
+    </capability>
+    <capability id="node.standby" in-pcs="1" in-pcsd="1">
+      <description>
+        Put one node or the local host if no node specified to and from
+        standby mode. If this functionality is not available, node.attributes
+        can be used instead to set the standby attribute.
+
+        pcs commands: node ( standby | unstandby )
+        daemon urls: node_standby, node_unstandby
+      </description>
+    </capability>
+    <capability id="node.standby.all" in-pcs="1" in-pcsd="0">
+      <description>
+        Put all cluster nodes defined in a cluster which the local host belongs
+        to to and from standby mode.
+
+        pcs commands: node ( standby | unstandby ) --all
+      </description>
+    </capability>
+    <capability id="node.standby.list" in-pcs="1" in-pcsd="0">
+      <description>
+        Put specified nodes in a cluster to and from standby mode.
+
+        pcs commands: node ( standby | unstandby ) node1 node2 ...
+      </description>
+    </capability>
+    <capability id="node.standby.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for nodes to finish transition to or from standby mode.
+
+        pcs commands: node ( standby | unstandby ) --wait[=timeout]
+      </description>
+    </capability>
+    <capability id="node.utilization" in-pcs="1" in-pcsd="1">
+      <description>
+        Show node utilization attributes, add and remove a node utilization
+        attribute.
+
+        pcs commands: node utilization
+        daemon urls: set_node_utilization
+      </description>
+    </capability>
+    <capability id="node.utilization.set-list-for-node" in-pcs="1" in-pcsd="0">
+      <description>
+        Set several node utilization attributes for a single node at once.
+
+        pcs commands: node utilization
+      </description>
+    </capability>
+
+
+
+    <capability id="node.confirm-off" in-pcs="1" in-pcsd="0">
+      <description>
+        Confirm to the cluster that the specified node is powered off.
+
+        pcs commands: stonith confirm
+      </description>
+    </capability>
+    <capability id="node.fence" in-pcs="1" in-pcsd="0">
+      <description>
+        Fence the specified node.
+
+        pcs commands: stonith fence
+      </description>
+    </capability>
+
+
+
+    <capability id="pcmk.acl.enable-disable" in-pcs="1" in-pcsd="0">
+      <description>
+        Enable and disable pacemaker ACLs. If this capability is not available,
+        pcmk.properties.cluster can be used instead to set the enable-acl
+        property.
+
+        pcs commands: acl ( enable | disable )
+      </description>
+    </capability>
+    <capability id="pcmk.acl.group" in-pcs="1" in-pcsd="0">
+      <description>
+        Create (with or without role ids) and delete ACL groups one at a time.
+        Show ACL groups.
+
+        pcs commands: acl group ( create | delete ), acl show
+      </description>
+    </capability>
+    <capability id="pcmk.acl.role" in-pcs="1" in-pcsd="1">
+      <description>
+        Create, delete, assign, unassign and show ACL roles, add and delete
+        roles' permissions, all one at a time.
+
+        pcs commands: acl role ( create | delete | assign | unassign ),
+          acl permission ( add | delete )
+        daemon urls: add_acl_role, remove_acl_role
+      </description>
+    </capability>
+    <capability id="pcmk.acl.role.create-with-permissions" in-pcs="1" in-pcsd="0">
+      <description>
+        Create an ACL role and its permissions in one command.
+
+        pcs commands: acl role create
+      </description>
+    </capability>
+    <capability id="pcmk.acl.role.delete-with-users-groups" in-pcs="1" in-pcsd="0">
+      <description>
+        When deleting a role, optinally delete users and groups to which the
+        role is assigned, if those users and groups do not have any other roles
+        assigned.
+      </description>
+    </capability>
+    <capability id="pcmk.acl.role.delete-with-users-groups-implicit" in-pcs="0" in-pcsd="1">
+      <description>
+        When deleting a role, automatically delete users and groups to which the
+        role is assigned, if those users and groups do not have any other roles
+        assigned.
+      </description>
+    </capability>
+    <capability id="pcmk.acl.user" in-pcs="1" in-pcsd="0">
+      <description>
+        Create (with or without role ids) and delete ACL users one at a time.
+        Show ACL groups.
+
+        pcs commands: acl user ( create | delete ), acl show
+      </description>
+    </capability>
+
+
+
+    <capability id="pcmk.alert" in-pcs="1" in-pcsd="1">
+      <description>
+        Set and show pacemaker alerts configuration:
+        * create an alert including its id, description, options and meta
+          attributes
+        * delete a list of alerts specified by theit ids
+        * update an alert including its description, options and meta attributes
+        * create an alert recipient in the specified agent including its id,
+          description, options and meta attributes
+        * delete a list of recipients specified by theit ids
+        * update an alert recipient including its description, options and meta
+          attributes
+      </description>
+    </capability>
+
+
+
+    <capability id="pcmk.cib.edit" in-pcs="1" in-pcsd="0">
+      <description>
+        Edit a CIB XML (as a plain text), support a CIB scope and editing the
+        configuration section.
+
+        pcs commands: cluster edit
+      </description>
+    </capability>
+    <capability id="pcmk.cib.get" in-pcs="1" in-pcsd="1">
+      <description>
+        Provide a CIB XML as a plain text.
+
+        pcs commands: cluster cib
+        daemon urls: get_cib
+      </description>
+    </capability>
+    <capability id="pcmk.cib.get.scope" in-pcs="1" in-pcsd="0">
+      <description>
+        Support a CIB scope when providing a CIB including providing just the
+        configuration section.
+
+        pcs commands: cluster cib
+      </description>
+    </capability>
+    <capability id="pcmk.cib.checkpoints" in-pcs="1" in-pcsd="0">
+      <description>
+        List, view (in a human-readable format) and restore CIB checkpoints.
+
+        pcs commands: config checkpoint ( | view | restore )
+      </description>
+    </capability>
+    <capability id="pcmk.cib.set" in-pcs="1" in-pcsd="0">
+      <description>
+        Push a CIB XML to a cluster, support CIB scope, pushing the
+        configuration section and pushing a CIB diff. Optionally wait for the
+        changes to take effect.
+
+        pcs commands: cluster cib-push
+      </description>
+    </capability>
+
+
+
+    <capability id="pcmk.constraint.location.simple" in-pcs="1" in-pcsd="1">
+      <description>
+        Create and delete a location constraint for one resource.
+
+        pcs commands: constraint location ( prefers | avoids ),
+          constraint location remove, constraint remove
+        daemon urls: add_constraint_remote (param: c_type=loc),
+          remove_constraint_remote
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.location.simple.options" in-pcs="1" in-pcsd="0">
+      <description>
+        Constraint options (resource-discovery) and id can be specified when
+        creating a constraint and are shown in constraint listing.
+
+        pcs commands: constraint location add
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.location.simple.resource-regexp" in-pcs="1" in-pcsd="0">
+      <description>
+        Resources can be specified by regular expressions.
+
+        pcs commands: regexp%resource_id
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.location.simple.rule" in-pcs="1" in-pcsd="1">
+      <description>
+        Can create rule location constraints.
+
+        pcs commands. constraint location rule
+        daemon urls: add_constraint_rule_remote (param: c_type=loc)
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.location.simple.rule.options" in-pcs="1" in-pcsd="0">
+      <description>
+        Constraint options (resource-discovery, role, rule-id, score-attribute)
+        and id can be specified when creating a constraint and are shown in
+        constraint listing.
+
+        pcs commands: constraint location rule
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.location.simple.rule.rule-add-remove" in-pcs="1" in-pcsd="0">
+      <description>
+        Add (including options role, rule-id, score-attribute) and remove rules
+        to an existing rule location constraint.
+
+        pcs commands: constraint rule ( add | remove )
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.colocation.simple" in-pcs="1" in-pcsd="1">
+      <description>
+        Create and delete a colocation constraint for a pair of resources.
+
+        pcs commands: constraint colocation add, constraint colocation remove,
+          constraint remove
+        daemon urls: add_constraint_remote (param: c_type=col),
+          remove_constraint_remote
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.colocation.simple.options" in-pcs="1" in-pcsd="0">
+      <description>
+        Can set constraint options, constraint id and resources's role.
+
+        pcs commands: constraint colocation add
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.colocation.set" in-pcs="1" in-pcsd="1">
+      <description>
+        Create and delete a colocation constraint for sets of resources.
+
+        pcs commands: constraint colocation set, constraint remove
+        daemon urls: add_constraint_set_remote (param: c_type=col),
+          remove_constraint_remote
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.colocation.set.options" in-pcs="1" in-pcsd="0">
+      <description>
+        Can set resource set options and constraint options.
+
+        pcs commands: constraint colocation set
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.order.simple" in-pcs="1" in-pcsd="1">
+      <description>
+        Create and delete an order constraint for a pair of resources, including
+        specifying resources' action.
+
+        pcs commands: constraint order add, constraint order remove,
+          constraint remove
+        daemon urls: add_constraint_remote (param: c_type=ord),
+          remove_constraint_remote
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.order.simple.options" in-pcs="1" in-pcsd="0">
+      <description>
+        Can set constraint options and constraint id.
+
+        pcs commands: constraint order add
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.order.set" in-pcs="1" in-pcsd="1">
+      <description>
+        Create and delete an order constraint for sets of resources.
+
+        pcs commands: constraint order set, constraint remove
+        daemon urls: add_constraint_set_remote (param: c_type=ord),
+          remove_constraint_remote
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.order.set.options" in-pcs="1" in-pcsd="0">
+      <description>
+        Can set resource set options and constraint options.
+
+        pcs commands: constraint order set
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.ticket.simple" in-pcs="1" in-pcsd="1">
+      <description>
+        Create and delete a ticket constraint for one resource, including
+        specifying a resource role and loss policy.
+
+        pcs commands: constraint ticket add, constraint ticket remove,
+          constraint remove
+        daemon urls: add_constraint_remote (param: c_type=ticket),
+          remove_constraint_remote
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.ticket.simple.constraint-id" in-pcs="1" in-pcsd="0">
+      <description>
+        Can set constraint id when creating a ticket constraint.
+
+        pcs commands: constraint ticket add
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.ticket.set" in-pcs="1" in-pcsd="1">
+      <description>
+        Create and delete a ticket constraint for sets of resources, including
+        specifying loss policy.
+
+        pcs commands: constraint ticket set, constraint remove
+        daemon urls: add_constraint_set_remote (param: c_type=ticket),
+          remove_constraint_remote
+      </description>
+    </capability>
+    <capability id="pcmk.constraint.ticket.set.options" in-pcs="1" in-pcsd="0">
+      <description>
+        Can set resource set options and constraint options (resources role, a
+        constraint id).
+
+        pcs commands: constraint ticket set
+      </description>
+    </capability>
+
+
+
+    <capability id="pcmk.properties.cluster" in-pcs="1" in-pcsd="1">
+      <description>
+        Show and set pacemaker cluster properties, can set multiple properties
+        at once.
+
+        pcs commands: prop ( | set | unset )
+        daemon urls: update_cluster_settings
+      </description>
+    </capability>
+    <capability id="pcmk.properties.cluster.describe" in-pcs="0" in-pcsd="1">
+      <description>
+        Show cluster properties definition and description (meta data).
+
+        daemon urls: get_cluster_properties_definition
+      </description>
+    </capability>
+    <capability id="pcmk.properties.operation-defaults" in-pcs="1" in-pcsd="0">
+      <description>
+        Show and set resource operations defaults, can set multiple defaults at
+        once.
+
+        pcs commands: resource op defaults
+      </description>
+    </capability>
+    <capability id="pcmk.properties.resource-defaults" in-pcs="1" in-pcsd="0">
+      <description>
+        Show and set resources defaults, can set multiple defaults at once.
+
+        pcs commands: resource defaults
+      </description>
+    </capability>
+
+
+
+    <capability id="pcmk.resource.create" in-pcs="1" in-pcsd="1">
+      <description>
+        Create a resource (including specifying its options). Optionally put
+        the resource into a new clone, master/slave or to an existing or new
+        group. When putting the resource to a group, optionally specify the
+        resource's position in the group. Optionally disable the resource upon
+        its creation.
+
+        pcs commands: resource create
+        daemon urls: update_resource (param: resource_id not set)
+      </description>
+    </capability>
+    <capability id="pcmk.resource.create.in-existing-bundle" in-pcs="1" in-pcsd="0">
+      <description>
+        Put a newly created resource into an existing bundle.
+
+        pcs commands: resource create ... bundle
+      </description>
+    </capability>
+    <capability id="pcmk.resource.create.meta" in-pcs="1" in-pcsd="0">
+      <description>
+        Set resource meta attributes when creating a resource. When creating a
+        resource as a clone resource, it is possible to set clone meta
+        attributes. When creating a resource into a master/slave resource, it is
+        possible to set master/slave meta attributes.
+
+        pcs commands: resource create
+      </description>
+    </capability>
+    <capability id="pcmk.resource.create.operations" in-pcs="1" in-pcsd="0">
+      <description>
+        Set resource operations when creating a resource, also allow to disable
+        copying operations from a resource agent.
+
+        pcs commands: resource create ... op
+      </description>
+    </capability>
+    <capability id="pcmk.resource.create.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the created resource to start.
+
+        pcs commands: resource create ... --wait
+      </description>
+    </capability>
+
+    <capability id="pcmk.resource.delete" in-pcs="1" in-pcsd="1">
+      <description>
+        Delete a resource.
+
+        pcs commands: resource delete
+        daemon urls: remove_resource
+      </description>
+    </capability>
+    <capability id="pcmk.resource.delete.list" in-pcs="0" in-pcsd="1">
+      <description>
+        Delete several resources at once.
+
+        daemon urls: remove_resource
+      </description>
+    </capability>
+
+    <capability id="pcmk.resource.update" in-pcs="1" in-pcsd="1">
+      <description>
+        Update instance options of a resource. Several options can be updated at
+        once.
+
+        pcs commands: resource update
+        daemon urls: update_resource (param: resource_id is set)
+      </description>
+    </capability>
+    <capability id="pcmk.resource.update.meta" in-pcs="1" in-pcsd="0">
+      <description>
+        Can update both instance and meta attributes of a resource, several
+        instance and meta attributes can be specified.
+
+        pcs commands: resource update ... meta
+      </description>
+    </capability>
+    <capability id="pcmk.resource.update.operations" in-pcs="1" in-pcsd="0">
+      <description>
+        Can update both instance attributes and operations of a resource,
+        several instance attributes and operations can be specified.
+
+        pcs commands: resource update ... op
+      </description>
+    </capability>
+    <capability id="pcmk.resource.update.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the changes to take effect.
+
+        pcs commands: resource update --wait
+      </description>
+    </capability>
+    <capability id="pcmk.resource.update-meta" in-pcs="1" in-pcsd="1">
+      <description>
+        Update a meta attribute of a (primitive, clone, master/slave, group)
+        resource.
+
+        pcs commands: resource meta
+        daemon urls: add_meta_attr_remote
+      </description>
+    </capability>
+    <capability id="pcmk.resource.update-meta.list" in-pcs="1" in-pcsd="0">
+      <description>
+        Update several meta attributes of a resource at once.
+
+        pcs commands: resource update meta, resource meta
+      </description>
+    </capability>
+    <capability id="pcmk.resource.update-meta.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the changes to take effect.
+
+        pcs commands: resource update --wait, resource meta --wait
+      </description>
+    </capability>
+    <capability id="pcmk.resource.update-operations" in-pcs="1" in-pcsd="0">
+      <description>
+        Create and delete an operation of an existing resource. An operation can
+        be deleted based on its id or definition (resource and action and
+        options).
+
+        pcs commands: resource op ( add | remove )
+      </description>
+    </capability>
+
+    <capability id="pcmk.resource.group" in-pcs="1" in-pcsd="1">
+      <description>
+        Support resource groups:
+        * Create a group containing specified resources
+        * Add a resource to a group, optionally specifying the resource's
+          position
+        * Remove a resource from a group
+        * Ungroup - get all resources out of a group and delete the (now empty)
+          group
+
+        pcs commands: resource group ( add | remove ), resource ungroup
+        daemon urls: add_group, resource_ungroup, resource_change_group,
+          update_resource
+      </description>
+    </capability>
+    <capability id="pcmk.resource.group.add-remove-list" in-pcs="1" in-pcsd="0">
+      <description>
+        Add or remove several resource from a group at once.
+
+        pcs commands: resource group ( add | remove )
+      </description>
+    </capability>
+    <capability id="pcmk.resource.group.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the changes to take effect.
+
+        pcs commands: resource group ( add | remove ) --wait,
+          resource ungroup --wait
+      </description>
+    </capability>
+    <capability id="pcmk.resource.clone" in-pcs="1" in-pcsd="1">
+      <description>
+        Turn an existing resource to a clone resource and the other way around.
+
+        pcs commands: resource clone, resource unclone
+        daemon urls: update_resource, resource_clone, resource_unclone
+      </description>
+    </capability>
+    <capability id="pcmk.resource.clone.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the changes to take effect.
+
+        pcs commands: resource clone -- wait, resource unclone --wait
+      </description>
+    </capability>
+    <capability id="pcmk.resource.clone.meta-in-create" in-pcs="1" in-pcsd="0">
+      <description>
+        Set clone meta attributes when cloning a resource.
+
+        pcs commands: resource clone
+      </description>
+    </capability>
+    <capability id="pcmk.resource.master" in-pcs="1" in-pcsd="1">
+      <description>
+        Turn an existing resource to a master/slave resource and the other way
+        around.
+
+        pcs commands: resource master, resource unclone
+        daemon urls: update_resource, resource_master, resource_unclone
+      </description>
+    </capability>
+    <capability id="pcmk.resource.master.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the changes to take effect.
+
+        pcs commands: resource master -- wait, resource unclone --wait
+      </description>
+    </capability>
+    <capability id="pcmk.resource.master.meta-in-create" in-pcs="1" in-pcsd="0">
+      <description>
+        Set master/slave meta attributes when turning an existing resource to a
+        master/slave resource.
+
+        pcs commands: resource master
+      </description>
+    </capability>
+    <capability id="pcmk.resource.bundle" in-pcs="1" in-pcsd="0">
+      <description>
+        Support bundle resources: mainly create, update and delete them as well
+        as put a resource into a bundle. Includes support in other resource
+        related commands.
+
+        pcs commands: resource bundle ( create | update )
+      </description>
+    </capability>
+    <capability id="pcmk.resource.bundle.container-docker" in-pcs="1" in-pcsd="0">
+      <description>
+        Support docker container in bundle resources.
+
+        pcs commands: resource bundle
+      </description>
+    </capability>
+    <capability id="pcmk.resource.bundle.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the changes to take effect when creating or updating a bundle
+        resource.
+
+        pcs commands: resource bundle ( create | update ) --wait
+      </description>
+    </capability>
+
+    <capability id="pcmk.resource.enable-disable" in-pcs="1" in-pcsd="1">
+      <description>
+        Disable and enable a resource.
+
+        pcs commands: resource disable, resource enable
+        daemon urls: resource_stop, resource_start
+      </description>
+    </capability>
+    <capability id="pcmk.resource.enable-disable.list" in-pcs="1" in-pcsd="0">
+      <description>
+        Disable or enable several resources at once.
+
+        pcs commands: resource disable, resource enable
+      </description>
+    </capability>
+    <capability id="pcmk.resource.enable-disable.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for resources to start or stop.
+
+        pcs commands: resource disable --wait, resource enable --wait
+      </description>
+    </capability>
+    <capability id="pcmk.resource.manage-unmanage" in-pcs="1" in-pcsd="1">
+      <description>
+        Put a resource into unmanaged and managed mode.
+
+        pcs commands: resource unmanage, resource manage
+        daemon urls: manage_resource, unmanage_resource
+      </description>
+    </capability>
+    <capability id="pcmk.resource.manage-unmanage.list" in-pcs="1" in-pcsd="1">
+      <description>
+        Put several resources into unmanaged or managed mode at once.
+
+        pcs commands: resource unmanage, resource manage
+        daemon urls: manage_resource, unmanage_resource
+      </description>
+    </capability>
+    <capability id="pcmk.resource.manage-unmanage.with-monitor" in-pcs="1" in-pcsd="0">
+      <description>
+        Optionally disable or enable monitor operations when putting resources
+        into unmanaged or managed mode respectively.
+
+        pcs commands: resource ( unmanage | manage ) --with-monitor
+      </description>
+    </capability>
+    <capability id="pcmk.resource.utilization" in-pcs="1" in-pcsd="1">
+      <description>
+        Show resource utilization attributes, add and remove a resource
+        utilization attribute.
+
+        pcs commands: resource utilization
+        daemon urls: set_resource_utilization
+      </description>
+    </capability>
+    <capability id="pcmk.resource.utilization-set-list-for-resource" in-pcs="1" in-pcsd="0">
+      <description>
+        Set several resource utilization attributes for a single resource at
+        once.
+
+        pcs commands: resource utilization
+      </description>
+    </capability>
+
+    <capability id="pcmk.resource.ban-move-clear" in-pcs="1" in-pcsd="0">
+      <description>
+        Ban a resource on a specified node and move a resource to a specified
+        node by creating constraints. Provide means to remove the constraints.
+        Applies to one specified resource. Optional parameters: a node, limiting
+        to a master role, a constraint lifetime. Optionally wait for the
+        constraints to take effect.
+      </description>
+    </capability>
+    <capability id="pcmk.resource.debug" in-pcs="1" in-pcsd="0">
+      <description>
+        Force a resource operation to happen on the local node and show debug
+        log of it. Support start, stop, promote, demote and monitor operations.
+
+        pcs commands: resource ( debug-start | debug-stop | debug-promote
+          | debug-demote | debug-monitor )
+      </description>
+    </capability>
+    <capability id="pcmk.resource.cleanup" in-pcs="1" in-pcsd="0">
+      <description>
+        Forget history of resources and redetect their current state. Optionally
+        specify a resource and/or a node.
+
+        pcs commands: resource cleanup
+      </description>
+    </capability>
+    <capability id="pcmk.resource.cleanup.one-resource" in-pcs="1" in-pcsd="1">
+      <description>
+        Forget history of a specified resource on all nodes and redetect its
+        current state.
+
+        pcs commands: resource cleanup
+        daemon urls: resource_cleanup
+      </description>
+    </capability>
+    <capability id="pcmk.resource.failcount" in-pcs="1" in-pcsd="0">
+      <description>
+        Show or reset failcount of a specified resource on all nodes or on
+        a specified node.
+
+        pcs commands: resource failcount ( show | reset )
+      </description>
+    </capability>
+    <capability id="pcmk.resource.relocate" in-pcs="1" in-pcsd="0">
+      <description>
+        Relocate all or specified resources to their preferred nodes, this also
+        includes a dry-run mode, means for displaying resources' optimal
+        placement and means for cleaning up related temporary settings.
+
+        pcs commands: resource relocate ( dry-run | run | show | clear )
+      </description>
+    </capability>
+    <capability id="pcmk.resource.restart" in-pcs="1" in-pcsd="0">
+      <description>
+        Restart a resource, allow specifying a node for multi-node resources
+        (clones, master/slaves, bundles), allow waiting for the resource to
+        restart.
+
+        pcs commands: resource restart
+      </description>
+    </capability>
+
+
+
+    <capability id="pcmk.stonith.create" in-pcs="1" in-pcsd="1">
+      <description>
+        Create a stonith resource (including specifying its options). Optionally
+        disable the resource upon its creation.
+
+        pcs commands: stonith create
+        daemon urls: update_fence_device (param: resource_id not set)
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.create.in-group" in-pcs="1" in-pcsd="0">
+      <description>
+        Put a new stonith resource into an existing or new group upon its
+        creation. Optionally specify the resource's position in the group.
+
+        pcs commands: stonith create ... group
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.create.meta" in-pcs="1" in-pcsd="0">
+      <description>
+        Set resource meta attributes when creating a stonith resource.
+
+        pcs commands: stonith create ... meta
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.create.operations" in-pcs="1" in-pcsd="0">
+      <description>
+        Set resource operations when creating a stonith resource.
+
+        pcs commands: stonith create ... op
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.create.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for the created resource to start.
+
+        pcs commands: stonith create ... --wait
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.delete" in-pcs="1" in-pcsd="0">
+      <description>
+        Delete a stonith resource.
+
+        pcs commands: stonith delete
+        daemon urls: currently missing, remove_resource is used instead which
+          works for now
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.update" in-pcs="1" in-pcsd="1">
+      <description>
+        Update instance options of a stonith resource.
+
+        pcs commands: stonith update
+        daemon urls: update_fence_device (param: resource_id is set)
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.enable-disable" in-pcs="1" in-pcsd="0">
+      <description>
+        Disable and enable a stonith resource.
+
+        pcs commands: stonith disable, stonith enable
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.enable-disable.list" in-pcs="1" in-pcsd="0">
+      <description>
+        Disable or enable several stonith resources at once.
+
+        pcs commands: stonith disable, stonith enable
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.enable-disable.wait" in-pcs="1" in-pcsd="0">
+      <description>
+        Wait for resources to start or stop.
+
+        pcs commands: stonith disable --wait, stonith enable --wait
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.cleanup" in-pcs="1" in-pcsd="0">
+      <description>
+        Forget history of stonith resources and redetect their current state.
+        Optionally specify a resource and/or a node.
+
+        pcs commands: stonith cleanup
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.levels" in-pcs="1" in-pcsd="1">
+      <description>
+        Basic stonith levels configuration:
+        * add a new stonith level with specified level, node and stonith device
+        * remove a stonith level with specified level, node and stonith device
+
+        pcs commands: stonith level ( | add | remove )
+        daemon urls: add_fence_level_remote
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.levels.add-remove-devices-list" in-pcs="1" in-pcsd="0">
+      <description>
+        Add or remove a list of stonith to a fencing level.
+
+        pcs commands: stonith level ( add | remove )
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.levels.node-attr" in-pcs="1" in-pcsd="0">
+      <description>
+        Support specifying a target by a node attribute.
+
+        pcs commands: attrib%name=value
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.levels.node-regexp" in-pcs="1" in-pcsd="0">
+      <description>
+        Support specifying a target by a regular expression applied to node
+        name.
+
+        pcs commands: regexp%pattern
+      </description>
+    </capability>
+
+
+
+    <capability id="pcs.auth" in-pcs="1" in-pcsd="1">
+      <description>
+        Authenticate pcsd on a list of hosts against each other by a username
+        and password generating tokens which are then used to connect to pcsd
+        on the hosts.
+
+        pcs commands: cluster auth
+        daemon urls: auth
+      </description>
+    </capability>
+    <capability id="pcs.auth.clear" in-pcs="1" in-pcsd="0">
+      <description>
+        Remove tokens created during nodes authorization on the local and/or
+        remote hosts.
+      </description>
+    </capability>
+    <capability id="pcs.automatic-pcs-configs-sync" in-pcs="0" in-pcsd="1">
+      <description>
+        Automatically synchronize pcs/pcsd configuration files across the local
+        cluster. This consists of the synchonization itself done by the daemon
+        and commands to configure it (daemon and client).
+
+        daemon urls: get_sync_capabilities, set_sync_options, get_configs,
+          set_configs
+      </description>
+    </capability>
+    <capability id="pcs.cfg-in-file.cib" in-pcs="1" in-pcsd="0">
+      <description>
+        Can work with a CIB provided in a file instead of the live CIB of the
+        local cluster,
+
+        pcs commands: -f
+      </description>
+    </capability>
+    <capability id="pcs.permissions" in-pcs="0" in-pcsd="1">
+      <description>
+        Configure, list and enforce permissions for pcs/pcsd commands.
+
+        daemon urls: get_permissions, set_permissions
+      </description>
+    </capability>
+    <capability id="pcs.request-timeout" in-pcs="1" in-pcsd="0">
+      <description>
+        It is possible to se a request timeout applied in node to node
+        communication at pcs level.
+
+        pcs commands: --request-timeout
+      </description>
+    </capability>
+    <capability id="pcs.daemon-ssl-cert.set" in-pcs="1" in-pcsd="1">
+      <description>
+        Set a SSL certificate (a certificate-key pair) to be used by pcsd on the
+        local host.
+
+        pcs commands: pcsd certkey
+        daemon urls: set_certs
+      </description>
+    </capability>
+    <capability id="pcs.daemon-ssl-cert.sync-to-local-cluster" in-pcs="1" in-pcsd="0">
+      <description>
+        Push the daemon's SSL certificate from the local host to all nodes in
+        the local cluster.
+      </description>
+    </capability>
+
+
+
+    <capability id="resource-agents.describe" in-pcs="1" in-pcsd="1">
+      <description>
+        Describe a resource agent - present its metadata.
+
+        pcs commands: resource describe
+        daemon urls: get_resource_agent_metadata
+      </description>
+    </capability>
+    <capability id="resource-agents.list" in-pcs="1" in-pcsd="1">
+      <description>
+        List resource agents available on the local host.
+
+        pcs commands: resource list
+        daemon urls: get_avail_resource_agents
+      </description>
+    </capability>
+    <capability id="resource-agents.list.detailed" in-pcs="1" in-pcsd="0">
+      <description>
+        List resource agents available on the local host including their
+        description, list available standards and providers, filter agents by
+        name.
+
+        pcs commands: resource list, resource standards, resource providers,
+          resource agents
+      </description>
+    </capability>
+    <capability id="stonith-agents.describe" in-pcs="1" in-pcsd="1">
+      <description>
+        Describe a stonith agent - present its metadata.
+
+        pcs commands: stonith describe
+        daemon urls: get_fence_agent_metadata
+      </description>
+    </capability>
+    <capability id="stonith-agents.list" in-pcs="1" in-pcsd="1">
+      <description>
+        List stonith agents available on the local host.
+
+        pcs commands: stonith list
+        daemon urls: get_avail_fence_agents
+      </description>
+    </capability>
+    <capability id="stonith-agents.list.detailed" in-pcs="1" in-pcsd="0">
+      <description>
+        List stonith agents available on the local host including their
+        description, filter agents by name.
+
+        pcs commands: stonith list
+      </description>
+    </capability>
+
+
+
+    <capability id="sbd" in-pcs="1" in-pcsd="1">
+      <description>
+        Enable and disable SBD in the local cluster, show cluster SBD
+        configuration and status. Can set SBD with a watchdog.
+
+        pcs commands: stonith sbd ( enable | disable ), stonith sbd status,
+          stonith sbd config
+        daemon urls: remote_enable_sbd, remote_disable_sbd
+      </description>
+    </capability>
+    <capability id="sbd.shared-block-device" in-pcs="1" in-pcsd="0">
+      <description>
+        Support SBD with shared block devices:
+        * setup a device
+        * set a device message
+        * optionally use up to 3 devices when enabling SBD in a cluster
+
+        pcs commands: stonith sbd device ( setup | message ), stonith sbd enable
+      </description>
+    </capability>
+    <capability id="sbd-node" in-pcs="0" in-pcsd="1">
+      <description>
+        Configure SBD on the node level:
+        * check if node can run SBD
+        * set and get SBD config
+        * set stonith watchdog timeout
+        * enable and disable SBD service
+
+        daemon urls: check_sbd, set_sbd_config, get_sbd_config,
+          set_stonith_watchdog_timeout_to_zero, remove_stonith_watchdog_timeout,
+          sbd_enable, sbd_disable
+      </description>
+    </capability>
+    <capability id="sbd-node.shared-block-device" in-pcs="0" in-pcsd="1">
+      <description>
+        Configure SBD on the node level with shared block devices:
+        * check if node can run SBD and has access to the specified SBD devices
+
+        daemon urls: check_sbd
+      </description>
+    </capability>
+
+
+
+    <capability id="status.corosync.membership" in-pcs="1" in-pcsd="0">
+      <description>
+        Show corosync membership on a node.
+
+        pcs commands: status corosync
+      </description>
+    </capability>
+    <capability id="status.pcmk.local-node" in-pcs="0" in-pcsd="1">
+      <description>
+        Show status pacemaker status of the local node.
+
+        daemon urls:pacemaker_node_status
+      </description>
+    </capability>
+    <capability id="status.pcmk.resources.hide-inactive" in-pcs="1" in-pcsd="0">
+      <description>
+        Can hide inactive resources when showing resource status.
+
+        pcs commands: status resources --hide-inactive
+      </description>
+    </capability>
+    <capability id="status.pcmk.resources.orphaned" in-pcs="1" in-pcsd="0">
+      <description>
+        Display orphaned resources when showing resource status.
+
+        pcs commands: status resources
+      </description>
+    </capability>
+    <capability id="status.pcmk.xml" in-pcs="1" in-pcsd="0">
+      <description>
+        Display pacemaker status in XML format.
+
+        pcs commands: status xml
+      </description>
+    </capability>
+  </capability-list>
+</pcs-capabilities>
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 1fd5167..68d2e7e 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -1165,16 +1165,21 @@ def add_prefix_to_keys(hash, prefix)
   return new_hash
 end
 
-def check_gui_status_of_nodes(auth_user, nodes, check_mutuality=false, timeout=10)
+def check_gui_status_of_nodes(auth_user, nodes, check_mutuality=false, timeout=10, ports=nil)
   options = {}
   options[:check_auth_only] = '' if not check_mutuality
   threads = []
   not_authorized_nodes = []
   online_nodes = []
   offline_nodes = []
+  token_file = read_token_file()
 
   nodes = nodes.uniq.sort
   nodes.each { |node|
+    if ports and ports[node] != token_file.ports[node]
+      not_authorized_nodes << node
+      next
+    end
     threads << Thread.new {
       code, response = send_request_with_token(
         auth_user, node, 'check_auth', false, options, true, nil, timeout
@@ -1203,6 +1208,7 @@ def check_gui_status_of_nodes(auth_user, nodes, check_mutuality=false, timeout=1
             offline_nodes << node
           end
         rescue JSON::ParserError
+          not_authorized_nodes << node
         end
       end
     }
@@ -1220,7 +1226,7 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
   # check for already authorized nodes
   if not force
     online, offline, not_authenticated = check_gui_status_of_nodes(
-      auth_user, nodes.keys, true
+      auth_user, nodes.keys, true, 10, nodes
     )
     if not_authenticated.length < 1
       result = {}
@@ -1588,7 +1594,10 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
     :status => 'unknown',
     :node_list => [],
     :resource_list => [],
+    # deprecated, kept for backward compatibility
+    # use pcsd_capabilities instead
     :available_features => [],
+    :pcsd_capabilities => [],
   }
 
   threads = []
@@ -1618,6 +1627,7 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
       begin
         parsed_response = JSON.parse(response, {:symbolize_names => true})
         parsed_response[:available_features] ||= []
+        parsed_response[:pcsd_capabilities] ||= []
         if parsed_response[:noresponse]
           node_map[node][:node] = {}
           node_map[node][:node].update(node_status_unknown)
@@ -1708,11 +1718,15 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
   node_map.each { |_, cluster_status|
     node_status = cluster_status[:node][:status]
     node_name = cluster_status[:node][:name]
-    # create set of available features on all nodes
-    # it is intersection of available features from all nodes
+    # Create a set of available features on all nodes as an intersection of
+    # available features from all nodes. Do it for both the old deprecated list
+    # (available_features) and the new one (pcsd_capabilities)
     if node_status != 'unknown' and cluster_status[:available_features]
       status[:available_features] &= cluster_status[:available_features]
     end
+    if node_status != 'unknown' and cluster_status[:pcsd_capabilities]
+      status[:pcsd_capabilities] &= cluster_status[:pcsd_capabilities]
+    end
     if (
       cluster_status[:node][:services] and
       cluster_status[:node][:services][:sbd]
@@ -1855,6 +1869,8 @@ def get_node_status(auth_user, cib_dom)
       :nodes_utilization => get_nodes_utilization(cib_dom),
       :alerts => get_alerts(auth_user),
       :known_nodes => [],
+      # deprecated, kept for backward compatibility
+      # use pcsd_capabilities instead
       :available_features => [
         'constraint_colocation_set',
         'sbd',
@@ -1863,7 +1879,8 @@ def get_node_status(auth_user, cib_dom)
         'unmanaged_resource',
         'alerts',
         'hardened_cluster',
-      ]
+      ],
+      :pcsd_capabilities => CAPABILITIES_PCSD
   }
 
   nodes = get_nodes_status()
diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
index f8e04ed..9e14b60 100644
--- a/pcsd/pcsd.8
+++ b/pcsd/pcsd.8
@@ -1,4 +1,4 @@
-.TH PCSD "8" "October 2017" "pcs 0.9.160" "System Administration Utilities"
+.TH PCSD "8" "November 2017" "pcs 0.9.161" "System Administration Utilities"
 .SH NAME
 pcsd \- pacemaker/corosync configuration system daemon
 
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index ad0370c..f2dff9d 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -115,6 +115,35 @@ configure do
   STDERR.sync = true
   $logger = configure_logger('/var/log/pcsd/pcsd.log')
   $semaphore_cfgsync = Mutex.new
+
+  capabilities = []
+  capabilities_pcsd = []
+  begin
+    filename = (get_pcsd_path() + Pathname.new('capabilities.xml')).to_s
+    capabilities_xml = REXML::Document.new(File.new(filename))
+    capabilities_xml.elements.each('.//capability') { |feat_xml|
+      feat = {}
+      feat_xml.attributes.each() { |name, value|
+        feat[name] = value
+      }
+      feat['description'] = ''
+      if feat_xml.elements['description']
+        feat['description'] = feat_xml.elements['description'].text.strip
+      end
+      capabilities << feat
+    }
+    capabilities.each { |feat|
+      if feat['in-pcsd'] == '1'
+        capabilities_pcsd << feat['id']
+      end
+    }
+  rescue => e
+    $logger.error(
+      "Cannot read capabilities definition file '#{filename}': '#{e}'"
+    )
+  end
+  CAPABILITIES = capabilities.freeze
+  CAPABILITIES_PCSD = capabilities_pcsd.freeze
 end
 
 set :logging, true
@@ -646,17 +675,8 @@ already been added to pcsd.  You may not add two clusters with the same name int
       ports[node] = (params["port-#{node}"] || '').strip
     }
     node_results = {}
-    node_list_to_check = []
-    token_file = read_token_file()
-    ports.each { |node, port|
-      if port != (token_file.ports[node] || '')
-        node_results[node] = 'Unable to authenticate'
-      else
-        node_list_to_check << node
-      end
-    }
     online, offline, notauthorized = check_gui_status_of_nodes(
-      auth_user, node_list_to_check
+      auth_user, node_list, false, 10, ports
     )
     online.each { |node|
       node_results[node] = 'Online'
diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
index 20bc9ab..a28faab 100644
--- a/pcsd/pcsd.service
+++ b/pcsd/pcsd.service
@@ -1,5 +1,7 @@
 [Unit]
 Description=PCS GUI and remote configuration interface
+Documentation=man:pcsd(8)
+Documentation=man:pcs(8)
 
 [Service]
 EnvironmentFile=/etc/sysconfig/pcsd
diff --git a/pcsd/pcsd.service.debian b/pcsd/pcsd.service.debian
index 0892773..d37ed5a 100644
--- a/pcsd/pcsd.service.debian
+++ b/pcsd/pcsd.service.debian
@@ -1,5 +1,7 @@
 [Unit]
 Description=PCS GUI and remote configuration interface
+Documentation=man:pcsd(8)
+Documentation=man:pcs(8)
 
 [Service]
 EnvironmentFile=/etc/default/pcsd
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 239b915..953ba60 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -38,28 +38,48 @@ Pcs = Ember.Application.createWithMixins({
     if (this.cur_page == "wizards") return "display: table-row;";
     else return "display: none;";
   }.property("cur_page"),
-  available_features: [],
+  available_features: [], /* deprecated capability list */
+  pcsd_capabilities: [], /* new capability list */
   is_sbd_supported: function() {
-    return (this.get("available_features").indexOf("sbd") != -1);
-  }.property("available_features"),
+    return (
+      (this.get("available_features").indexOf("sbd") != -1)
+      ||
+      (this.get("pcsd_capabilities").indexOf("sbd") != -1)
+    );
+  }.property("available_features", "pcsd_capabilities"),
   is_ticket_constraints_supported: function(){
     return (
-      this.get("available_features").indexOf("ticket_constraints") != -1
+      (this.get("available_features").indexOf("ticket_constraints") != -1)
+      ||
+      (
+        (this.get("pcsd_capabilities").indexOf("pcmk.constraint.ticket.simple") != -1)
+        &&
+        (this.get("pcsd_capabilities").indexOf("pcmk.constraint.ticket.set") != -1)
+      )
     );
-  }.property("available_features"),
+  }.property("available_features", "pcsd_capabilities"),
   is_supported_constraint_colocation_set: function() {
     return (
-      this.get("available_features").indexOf("constraint_colocation_set") != -1
+      (this.get("available_features").indexOf("constraint_colocation_set") != -1)
+      ||
+      (this.get("pcsd_capabilities").indexOf("pcmk.constraint.colocation.set") != -1)
     );
-  }.property("available_features"),
+  }.property("available_features", "pcsd_capabilities"),
   is_supported_moving_resource_in_group: function() {
     return (
-      this.get("available_features").indexOf("moving_resource_in_group") != -1
+      (this.get("available_features").indexOf("moving_resource_in_group") != -1)
+      ||
+      /* ability to set a position in a group is mandatory in pcmk.resource.group */
+      (this.get("pcsd_capabilities").indexOf("pcmk.resource.group") != -1)
     );
-  }.property("available_features"),
+  }.property("available_features", "pcsd_capabilities"),
   is_supported_unmanaged_resource: function() {
-    return (this.get("available_features").indexOf("unmanaged_resource") != -1);
-  }.property("available_features"),
+    return (
+      (this.get("available_features").indexOf("unmanaged_resource") != -1)
+      ||
+      (this.get("pcsd_capabilities").indexOf("pcmk.resource.manage-unmanage") != -1)
+    );
+  }.property("available_features", "pcsd_capabilities"),
   is_sbd_running: false,
   is_sbd_enabled: false,
   is_sbd_enabled_or_running: function() {
@@ -156,10 +176,16 @@ Pcs = Ember.Application.createWithMixins({
         Pcs.set("cluster_settings",data.cluster_settings);
         Pcs.set('need_ring1_address', false);
         Pcs.set('is_cman_with_udpu_transport', false);
+        /* deprecated capability list */
         Pcs.set(
           'available_features',
           data['available_features'] ? data['available_features'] : []
         );
+        /* new capability list */
+        Pcs.set(
+          'pcsd_capabilities',
+          data['pcsd_capabilities'] ? data['pcsd_capabilities'] : []
+        );
         if (data['need_ring1_address']) {
           Pcs.set('need_ring1_address', true);
         }
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 34d0318..cd7ec1f 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -23,6 +23,7 @@ require 'pcsd_exchange_format.rb'
 # Commands for remote access
 def remote(params, request, auth_user)
   remote_cmd_without_pacemaker = {
+      :capabilities => method(:capabilities),
       :status => method(:node_status),
       :status_all => method(:status_all),
       :cluster_status => method(:cluster_status_remote),
@@ -159,6 +160,12 @@ def remote(params, request, auth_user)
   end
 end
 
+def capabilities(params, request, auth_user)
+  return JSON.generate({
+    :pcsd_capabilities => CAPABILITIES_PCSD,
+  })
+end
+
 # provides remote cluster status to a local gui
 def cluster_status_gui(auth_user, cluster_name, dont_update_config=false)
   cluster_nodes = get_cluster_nodes(cluster_name)
diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
index 8790bd1..61d03ac 100644
--- a/pcsd/settings.rb.debian
+++ b/pcsd/settings.rb.debian
@@ -1,6 +1,7 @@
 PCS_EXEC = '/usr/sbin/pcs'
 PCSD_EXEC_LOCATION = '/usr/share/pcsd/'
 PCSD_VAR_LOCATION = '/var/lib/pcsd/'
+PCSD_DEFAULT_PORT = 2224
 
 CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
 KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
diff --git a/setup.py b/setup.py
index 5ba9b27..4fa23ef 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ class CleanCommand(Command):
 
 setup(
     name='pcs',
-    version='0.9.160',
+    version='0.9.161',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list