[Debian-ha-commits] [pcs] 02/08: Imported Upstream version 0.9.153
Valentin Vidic
vvidic-guest at moszumanska.debian.org
Tue Jul 5 13:00:13 UTC 2016
This is an automated email from the git hooks/post-receive script.
vvidic-guest pushed a commit to branch master
in repository pcs.
commit 12263e711c1ad6558e37bccb04f5d3163ea713b9
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date: Tue Jul 5 13:27:20 2016 +0200
Imported Upstream version 0.9.153
---
.pylintrc | 2 +-
Makefile | 10 +-
pcs/alert.py | 237 ++
pcs/app.py | 6 +
pcs/cli/common/env.py | 1 +
pcs/cli/common/lib_wrapper.py | 39 +-
pcs/cli/common/middleware.py | 2 +-
pcs/cluster.py | 138 +-
pcs/common/report_codes.py | 38 +-
pcs/config.py | 4 +
pcs/lib/cib/alert.py | 281 +++
pcs/lib/cib/nvpair.py | 90 +
pcs/lib/cib/test/test_alert.py | 931 ++++++++
pcs/lib/cib/test/test_nvpair.py | 206 ++
pcs/lib/cib/tools.py | 127 +
pcs/lib/commands/alert.py | 169 ++
pcs/lib/commands/qdevice.py | 88 +-
pcs/lib/commands/quorum.py | 238 +-
pcs/lib/commands/test/test_alert.py | 639 +++++
pcs/lib/commands/test/test_ticket.py | 2 +-
pcs/lib/corosync/config_facade.py | 98 +-
pcs/lib/corosync/live.py | 30 +
pcs/lib/corosync/qdevice_client.py | 93 +
pcs/lib/corosync/qdevice_net.py | 314 ++-
pcs/lib/env.py | 43 +-
pcs/lib/errors.py | 6 +-
pcs/lib/external.py | 44 +-
pcs/lib/nodes_task.py | 69 +-
pcs/lib/pacemaker.py | 17 +-
pcs/lib/reports.py | 329 ++-
pcs/lib/sbd.py | 14 +-
pcs/pcs.8 | 63 +-
pcs/qdevice.py | 71 +
pcs/quorum.py | 41 +-
pcs/resource.py | 3 +-
pcs/settings_default.py | 9 +-
pcs/test/resources/cib-empty-2.5.xml | 10 +
pcs/test/resources/qdevice-certs/qnetd-cacert.crt | 1 +
pcs/test/suite.py | 6 +-
pcs/test/test_alert.py | 363 +++
pcs/test/test_lib_cib_tools.py | 181 +-
pcs/test/test_lib_commands_qdevice.py | 255 ++
pcs/test/test_lib_commands_quorum.py | 1161 ++++++++-
pcs/test/test_lib_corosync_config_facade.py | 367 ++-
pcs/test/test_lib_corosync_live.py | 104 +-
pcs/test/test_lib_corosync_qdevice_client.py | 60 +
pcs/test/test_lib_corosync_qdevice_net.py | 965 +++++++-
pcs/test/test_lib_env.py | 282 ++-
pcs/test/test_lib_external.py | 126 +-
pcs/test/test_lib_nodes_task.py | 168 +-
pcs/test/test_lib_pacemaker.py | 24 +-
pcs/test/test_lib_sbd.py | 10 +-
pcs/test/test_quorum.py | 9 +-
pcs/test/test_resource.py | 6 +
pcs/test/test_stonith.py | 3 +
pcs/test/test_utils.py | 2628 +++++++++++----------
pcs/test/tools/color_text_runner.py | 10 +
pcs/usage.py | 109 +-
pcs/utils.py | 156 +-
pcsd/bootstrap.rb | 2 +-
pcsd/pcs.rb | 17 +
pcsd/pcsd.service | 2 +-
pcsd/pcsd.service-runner | 13 +
pcsd/remote.rb | 234 +-
pcsd/settings.rb | 6 +
pcsd/settings.rb.debian | 10 +-
pcsd/views/main.erb | 13 +-
pcsd/views/nodes.erb | 14 +-
setup.py | 2 +-
69 files changed, 10126 insertions(+), 1683 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index 661f3d2..e378e6a 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy
[FORMAT]
# Maximum number of lines in a module
-max-module-lines=4571
+max-module-lines=4577
# Maximum number of characters on a single line.
max-line-length=1291
diff --git a/Makefile b/Makefile
index de216ce..cbbeb85 100644
--- a/Makefile
+++ b/Makefile
@@ -76,11 +76,16 @@ ifndef install_settings
endif
endif
+
+ifndef BASH_COMPLETION_DIR
+ BASH_COMPLETION_DIR=${DESTDIR}/etc/bash_completion.d
+endif
+
install:
$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
mkdir -p ${DESTDIR}${PREFIX}/sbin/
mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs
- install -D pcs/bash_completion.sh ${DESTDIR}/etc/bash_completion.d/pcs
+ install -D pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs
install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
ifeq ($(IS_DEBIAN),true)
ifeq ($(install_settings),true)
@@ -126,6 +131,9 @@ else
ifeq ($(IS_SYSTEMCTL),true)
install -d ${DESTDIR}/${systemddir}/system/
install -m 644 pcsd/pcsd.service ${DESTDIR}/${systemddir}/system/
+# ${DESTDIR}${PREFIX}/lib/pcsd/pcsd holds the selinux context
+ install -m 755 pcsd/pcsd.service-runner ${DESTDIR}${PREFIX}/lib/pcsd/pcsd
+ rm ${DESTDIR}${PREFIX}/lib/pcsd/pcsd.service-runner
else
install -m 755 -D pcsd/pcsd ${DESTDIR}/${initdir}/pcsd
endif
diff --git a/pcs/alert.py b/pcs/alert.py
new file mode 100644
index 0000000..d3a6e28
--- /dev/null
+++ b/pcs/alert.py
@@ -0,0 +1,237 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import sys
+
+from pcs import (
+ usage,
+ utils,
+)
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.common.parse_args import prepare_options
+from pcs.cli.common.console_report import indent
+from pcs.lib.errors import LibraryError
+
+
+def alert_cmd(*args):
+ argv = args[1]
+ if not argv:
+ sub_cmd = "config"
+ else:
+ sub_cmd = argv.pop(0)
+ try:
+ if sub_cmd == "help":
+ usage.alert(argv)
+ elif sub_cmd == "create":
+ alert_add(*args)
+ elif sub_cmd == "update":
+ alert_update(*args)
+ elif sub_cmd == "remove":
+ alert_remove(*args)
+ elif sub_cmd == "config" or sub_cmd == "show":
+ print_alert_config(*args)
+ elif sub_cmd == "recipient":
+ recipient_cmd(*args)
+ else:
+ raise CmdLineInputError()
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "alert", sub_cmd)
+
+
+def recipient_cmd(*args):
+ argv = args[1]
+
+ if not argv:
+ usage.alert(["recipient"])
+ sys.exit(1)
+
+ sub_cmd = argv.pop(0)
+ try:
+ if sub_cmd == "help":
+ usage.alert(["recipient"])
+ elif sub_cmd == "add":
+ recipient_add(*args)
+ elif sub_cmd == "update":
+ recipient_update(*args)
+ elif sub_cmd == "remove":
+ recipient_remove(*args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(
+ e, "alert", "recipient {0}".format(sub_cmd)
+ )
+
+
+def parse_cmd_sections(arg_list, section_list):
+ output = dict([(section, []) for section in section_list + ["main"]])
+ cur_section = "main"
+ for arg in arg_list:
+ if arg in section_list:
+ cur_section = arg
+ continue
+ output[cur_section].append(arg)
+
+ return output
+
+
+def ensure_only_allowed_options(parameter_dict, allowed_list):
+ for arg, value in parameter_dict.items():
+ if arg not in allowed_list:
+ raise CmdLineInputError(
+ "Unexpected parameter '{0}={1}'".format(arg, value)
+ )
+
+
+def alert_add(lib, argv, modifiers):
+ if not argv:
+ raise CmdLineInputError()
+
+ sections = parse_cmd_sections(argv, ["options", "meta"])
+ main_args = prepare_options(sections["main"])
+ ensure_only_allowed_options(main_args, ["id", "description", "path"])
+
+ lib.alert.create_alert(
+ main_args.get("id", None),
+ main_args.get("path", None),
+ prepare_options(sections["options"]),
+ prepare_options(sections["meta"]),
+ main_args.get("description", None)
+ )
+
+
+def alert_update(lib, argv, modifiers):
+ if not argv:
+ raise CmdLineInputError()
+
+ alert_id = argv[0]
+
+ sections = parse_cmd_sections(argv[1:], ["options", "meta"])
+ main_args = prepare_options(sections["main"])
+ ensure_only_allowed_options(main_args, ["description", "path"])
+
+ lib.alert.update_alert(
+ alert_id,
+ main_args.get("path", None),
+ prepare_options(sections["options"]),
+ prepare_options(sections["meta"]),
+ main_args.get("description", None)
+ )
+
+
+def alert_remove(lib, argv, modifiers):
+ if len(argv) != 1:
+ raise CmdLineInputError()
+
+ lib.alert.remove_alert(argv[0])
+
+
+def recipient_add(lib, argv, modifiers):
+ if len(argv) < 2:
+ raise CmdLineInputError()
+
+ alert_id = argv[0]
+ recipient_value = argv[1]
+
+ sections = parse_cmd_sections(argv[2:], ["options", "meta"])
+ main_args = prepare_options(sections["main"])
+ ensure_only_allowed_options(main_args, ["description"])
+
+ lib.alert.add_recipient(
+ alert_id,
+ recipient_value,
+ prepare_options(sections["options"]),
+ prepare_options(sections["meta"]),
+ main_args.get("description", None)
+ )
+
+
+def recipient_update(lib, argv, modifiers):
+ if len(argv) < 2:
+ raise CmdLineInputError()
+
+ alert_id = argv[0]
+ recipient_value = argv[1]
+
+ sections = parse_cmd_sections(argv[2:], ["options", "meta"])
+ main_args = prepare_options(sections["main"])
+ ensure_only_allowed_options(main_args, ["description"])
+
+ lib.alert.update_recipient(
+ alert_id,
+ recipient_value,
+ prepare_options(sections["options"]),
+ prepare_options(sections["meta"]),
+ main_args.get("description", None)
+ )
+
+
+def recipient_remove(lib, argv, modifiers):
+ if len(argv) != 2:
+ raise CmdLineInputError()
+
+ lib.alert.remove_recipient(argv[0], argv[1])
+
+
+def _nvset_to_str(nvset_obj):
+ output = []
+ for nvpair_obj in nvset_obj:
+ output.append("{key}={value}".format(
+ key=nvpair_obj["name"], value=nvpair_obj["value"]
+ ))
+ return " ".join(output)
+
+
+def __description_attributes_to_str(obj):
+ output = []
+ if obj.get("description"):
+ output.append("Description: {desc}".format(desc=obj["description"]))
+ if obj.get("instance_attributes"):
+ output.append("Options: {attributes}".format(
+ attributes=_nvset_to_str(obj["instance_attributes"])
+ ))
+ if obj.get("meta_attributes"):
+ output.append("Meta options: {attributes}".format(
+ attributes=_nvset_to_str(obj["meta_attributes"])
+ ))
+ return output
+
+
+def _alert_to_str(alert):
+ content = []
+ content.extend(__description_attributes_to_str(alert))
+
+ recipients = []
+ for recipient in alert.get("recipient_list", []):
+ recipients.extend( _recipient_to_str(recipient))
+
+ if recipients:
+ content.append("Recipients:")
+ content.extend(indent(recipients, 1))
+
+ return ["Alert: {alert_id} (path={path})".format(
+ alert_id=alert["id"], path=alert["path"]
+ )] + indent(content, 1)
+
+
+def _recipient_to_str(recipient):
+ return ["Recipient: {value}".format(value=recipient["value"])] + indent(
+ __description_attributes_to_str(recipient), 1
+ )
+
+
+def print_alert_config(lib, argv, modifiers):
+ if argv:
+ raise CmdLineInputError()
+
+ print("Alerts:")
+ alert_list = lib.alert.get_all_alerts()
+ if alert_list:
+ for alert in alert_list:
+ print("\n".join(indent(_alert_to_str(alert), 1)))
+ else:
+ print(" No alerts defined")
diff --git a/pcs/app.py b/pcs/app.py
index 3c4865f..3758ee4 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -27,6 +27,7 @@ from pcs import (
stonith,
usage,
utils,
+ alert,
)
from pcs.cli.common import completion
@@ -193,6 +194,11 @@ def main(argv=None):
argv,
utils.get_modificators()
),
+ "alert": lambda args: alert.alert_cmd(
+ utils.get_library_wrapper(),
+ args,
+ utils.get_modificators()
+ ),
}
if command not in cmd_map:
usage.main()
diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py
index f407981..2ba4f70 100644
--- a/pcs/cli/common/env.py
+++ b/pcs/cli/common/env.py
@@ -8,6 +8,7 @@ from __future__ import (
class Env(object):
def __init__(self):
self.cib_data = None
+ self.cib_upgraded = False
self.user = None
self.groups = None
self.corosync_conf_data = None
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index 909b435..c4b8342 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -19,6 +19,7 @@ from pcs.lib.commands import (
quorum,
qdevice,
sbd,
+ alert,
)
from pcs.cli.common.reports import (
LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
@@ -42,6 +43,14 @@ def cli_env_to_lib_env(cli_env):
cli_env.auth_tokens_getter,
)
+def lib_env_to_cli_env(lib_env, cli_env):
+ if not lib_env.is_cib_live:
+ cli_env.cib_data = lib_env._get_cib_xml()
+ cli_env.cib_upgraded = lib_env.cib_upgraded
+ if not lib_env.is_corosync_conf_live:
+ cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
+ return cli_env
+
def bind(cli_env, run_with_middleware, run_library_command):
def run(cli_env, *args, **kwargs):
lib_env = cli_env_to_lib_env(cli_env)
@@ -50,10 +59,7 @@ def bind(cli_env, run_with_middleware, run_library_command):
#midlewares needs finish its work and they see only cli_env
#so we need reflect some changes to cli_env
- if not lib_env.is_cib_live:
- cli_env.cib_data = lib_env.get_cib_xml()
- if not lib_env.is_corosync_conf_live:
- cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
+ lib_env_to_cli_env(lib_env, cli_env)
return lib_call_result
return partial(run_with_middleware, run, cli_env)
@@ -110,7 +116,10 @@ def load_module(env, middleware_factory, name):
"add_device": quorum.add_device,
"get_config": quorum.get_config,
"remove_device": quorum.remove_device,
+ "set_expected_votes_live": quorum.set_expected_votes_live,
"set_options": quorum.set_options,
+ "status": quorum.status_text,
+ "status_device": quorum.status_device_text,
"update_device": quorum.update_device,
}
)
@@ -119,6 +128,7 @@ def load_module(env, middleware_factory, name):
env,
middleware.build(),
{
+ "status": qdevice.qdevice_status_text,
"setup": qdevice.qdevice_setup,
"destroy": qdevice.qdevice_destroy,
"start": qdevice.qdevice_start,
@@ -126,6 +136,13 @@ def load_module(env, middleware_factory, name):
"kill": qdevice.qdevice_kill,
"enable": qdevice.qdevice_enable,
"disable": qdevice.qdevice_disable,
+ # following commands are internal use only, called from pcsd
+ "client_net_setup": qdevice.client_net_setup,
+ "client_net_import_certificate":
+ qdevice.client_net_import_certificate,
+ "client_net_destroy": qdevice.client_net_destroy,
+ "sign_net_cert_request":
+ qdevice.qdevice_net_sign_certificate_request,
}
)
if name == "sbd":
@@ -140,6 +157,20 @@ def load_module(env, middleware_factory, name):
"get_local_sbd_config": sbd.get_local_sbd_config,
}
)
+ if name == "alert":
+ return bind_all(
+ env,
+ middleware.build(middleware_factory.cib),
+ {
+ "create_alert": alert.create_alert,
+ "update_alert": alert.update_alert,
+ "remove_alert": alert.remove_alert,
+ "add_recipient": alert.add_recipient,
+ "update_recipient": alert.update_recipient,
+ "remove_recipient": alert.remove_recipient,
+ "get_all_alerts": alert.get_all_alerts,
+ }
+ )
raise Exception("No library part '{0}'".format(name))
diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py
index 16618e1..e53e138 100644
--- a/pcs/cli/common/middleware.py
+++ b/pcs/cli/common/middleware.py
@@ -34,7 +34,7 @@ def cib(use_local_cib, load_cib_content, write_cib):
result_of_next = next_in_line(env, *args, **kwargs)
if use_local_cib:
- write_cib(env.cib_data)
+ write_cib(env.cib_data, env.cib_upgraded)
return result_of_next
return apply
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 002b5c5..9d4798c 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -36,23 +36,29 @@ from pcs import (
)
from pcs.utils import parallel_for_nodes
from pcs.common import report_codes
+from pcs.cli.common.reports import process_library_reports
from pcs.lib import (
pacemaker as lib_pacemaker,
sbd as lib_sbd,
reports as lib_reports,
)
-from pcs.lib.tools import environment_file_to_dict
+from pcs.lib.commands.quorum import _add_device_model_net
+from pcs.lib.corosync import (
+ config_parser as corosync_conf_utils,
+ qdevice_net,
+)
+from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
+from pcs.lib.errors import (
+ LibraryError,
+ ReportItemSeverity,
+)
from pcs.lib.external import (
disable_service,
NodeCommunicationException,
node_communicator_exception_to_report_item,
)
from pcs.lib.node import NodeAddresses
-from pcs.lib.errors import (
- LibraryError,
- ReportItemSeverity,
-)
-from pcs.lib.corosync import config_parser as corosync_conf_utils
+from pcs.lib.tools import environment_file_to_dict
def cluster_cmd(argv):
if len(argv) == 0:
@@ -288,7 +294,7 @@ def cluster_setup(argv):
)
if udpu_rrp and "rrp_mode" not in options["transport_options"]:
options["transport_options"]["rrp_mode"] = "passive"
- utils.process_library_reports(messages)
+ process_library_reports(messages)
# prepare config file
if is_rhel6:
@@ -306,7 +312,7 @@ def cluster_setup(argv):
options["totem_options"],
options["quorum_options"]
)
- utils.process_library_reports(messages)
+ process_library_reports(messages)
# setup on the local node
if "--local" in utils.pcs_options:
@@ -870,6 +876,7 @@ def start_cluster(argv):
return
print("Starting Cluster...")
+ service_list = []
if utils.is_rhel6():
# Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0
retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]')
@@ -882,14 +889,15 @@ def start_cluster(argv):
print(output)
utils.err("unable to start cman")
else:
- output, retval = utils.run(["service", "corosync","start"])
+ service_list.append("corosync")
+ if utils.need_to_handle_qdevice_service():
+ service_list.append("corosync-qdevice")
+ service_list.append("pacemaker")
+ for service in service_list:
+ output, retval = utils.run(["service", service, "start"])
if retval != 0:
print(output)
- utils.err("unable to start corosync")
- output, retval = utils.run(["service", "pacemaker", "start"])
- if retval != 0:
- print(output)
- utils.err("unable to start pacemaker")
+ utils.err("unable to start {0}".format(service))
if wait:
wait_for_nodes_started([], wait_timeout)
@@ -1035,14 +1043,20 @@ def enable_cluster(argv):
enable_cluster_nodes(argv)
return
- utils.enableServices()
+ try:
+ utils.enableServices()
+ except LibraryError as e:
+ process_library_reports(e.args)
def disable_cluster(argv):
if len(argv) > 0:
disable_cluster_nodes(argv)
return
- utils.disableServices()
+ try:
+ utils.disableServices()
+ except LibraryError as e:
+ process_library_reports(e.args)
def enable_cluster_all():
enable_cluster_nodes(utils.getNodesFromCorosyncConf())
@@ -1132,13 +1146,18 @@ def stop_cluster_corosync():
utils.err("unable to stop cman")
else:
print("Stopping Cluster (corosync)...")
- output, retval = utils.run(["service", "corosync","stop"])
- if retval != 0:
- print(output)
- utils.err("unable to stop corosync")
+ service_list = []
+ if utils.need_to_handle_qdevice_service():
+ service_list.append("corosync-qdevice")
+ service_list.append("corosync")
+ for service in service_list:
+ output, retval = utils.run(["service", service, "stop"])
+ if retval != 0:
+ print(output)
+ utils.err("unable to stop {0}".format(service))
def kill_cluster(argv):
- daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"]
+ daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"]
dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons)
# if dummy_retval != 0:
# print "Error: unable to execute killall -9"
@@ -1152,6 +1171,9 @@ def cluster_push(argv):
filename = None
scope = None
+ timeout = None
+ if "--wait" in utils.pcs_options:
+ timeout = utils.validate_wait_get_timeout()
for arg in argv:
if "=" not in arg:
filename = arg
@@ -1187,8 +1209,20 @@ def cluster_push(argv):
output, retval = utils.run(command)
if retval != 0:
utils.err("unable to push cib\n" + output)
- else:
- print("CIB updated")
+ print("CIB updated")
+ if "--wait" not in utils.pcs_options:
+ return
+ cmd = ["crm_resource", "--wait"]
+ if timeout:
+ cmd.extend(["--timeout", timeout])
+ output, retval = utils.run(cmd)
+ if retval != 0:
+ msg = []
+ if retval == settings.pacemaker_wait_timeout_status:
+ msg.append("waiting timeout")
+ if output:
+ msg.append("\n" + output)
+ utils.err("\n".join(msg).strip())
def cluster_edit(argv):
if 'EDITOR' in os.environ:
@@ -1321,19 +1355,16 @@ def cluster_node(argv):
"cluster is not configured for RRP, "
"you must not specify ring 1 address for the node"
)
- utils.check_qdevice_algorithm_and_running_cluster(
- utils.getCorosyncConf(), add=True
- )
corosync_conf = None
(canAdd, error) = utils.canAddNodeToCluster(node0)
if not canAdd:
utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
+ lib_env = utils.get_lib_env()
+ report_processor = lib_env.report_processor
+ node_communicator = lib_env.node_communicator()
+ node_addr = NodeAddresses(node0, node1)
try:
- node_addr = NodeAddresses(node0, node1)
- lib_env = utils.get_lib_env()
- report_processor = lib_env.report_processor
- node_communicator = lib_env.node_communicator()
if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
if "--watchdog" not in utils.pcs_options:
watchdog = settings.sbd_watchdog_default
@@ -1367,9 +1398,9 @@ def cluster_node(argv):
report_processor, node_communicator, node_addr
)
except LibraryError as e:
- utils.process_library_reports(e.args)
+ process_library_reports(e.args)
except NodeCommunicationException as e:
- utils.process_library_reports(
+ process_library_reports(
[node_communicator_exception_to_report_item(e)]
)
@@ -1383,6 +1414,8 @@ def cluster_node(argv):
else:
print("%s: Corosync updated" % my_node)
corosync_conf = output
+ # corosync.conf must be reloaded before the new node is started
+ output, retval = utils.reloadCorosync()
if corosync_conf != None:
# send local cluster pcsd configs to the new node
# may be used for sending corosync config as well in future
@@ -1406,6 +1439,25 @@ def cluster_node(argv):
except:
utils.err('Unable to communicate with pcsd')
+ # set qdevice-net certificates if needed
+ if not utils.is_rhel6():
+ try:
+ conf_facade = corosync_conf_facade.from_string(
+ corosync_conf
+ )
+ qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
+ if qdevice_model == "net":
+ _add_device_model_net(
+ lib_env,
+ qdevice_model_options["host"],
+ conf_facade.get_cluster_name(),
+ [node_addr],
+ skip_offline_nodes=False
+ )
+ except LibraryError as e:
+ process_library_reports(e.args)
+
+ print("Setting up corosync...")
utils.setCorosyncConfig(node0, corosync_conf)
if "--enable" in utils.pcs_options:
retval, err = utils.enableCluster(node0)
@@ -1421,7 +1473,6 @@ def cluster_node(argv):
pcsd.pcsd_sync_certs([node0], exit_after_error=False)
else:
utils.err("Unable to update any nodes")
- output, retval = utils.reloadCorosync()
if utils.is_cman_with_udpu_transport():
print("Warning: Using udpu transport on a CMAN cluster, "
+ "cluster restart is required to apply node addition")
@@ -1433,9 +1484,6 @@ def cluster_node(argv):
utils.err(
"node '%s' does not appear to exist in configuration" % node0
)
- utils.check_qdevice_algorithm_and_running_cluster(
- utils.getCorosyncConf(), add=False
- )
if "--force" not in utils.pcs_options:
retval, data = utils.get_remote_quorumtool_output(node0)
if retval != 0:
@@ -1697,10 +1745,18 @@ def cluster_destroy(argv):
else:
print("Shutting down pacemaker/corosync services...")
os.system("service pacemaker stop")
+ # returns error if qdevice is not running, it is safe to ignore it
+ # since we want it not to be running
+ os.system("service corosync-qdevice stop")
os.system("service corosync stop")
print("Killing any remaining services...")
- os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
- utils.disableServices()
+ os.system("killall -q -9 corosync corosync-qdevice aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
+ try:
+ utils.disableServices()
+ except:
+ # previously errors were suppressed in here, let's keep it that way
+ # for now
+ pass
try:
disable_service(utils.cmd_runner(), "sbd")
except:
@@ -1716,6 +1772,12 @@ def cluster_destroy(argv):
"pe*.bz2","cib.*"]
for name in state_files:
os.system("find /var/lib -name '"+name+"' -exec rm -f \{\} \;")
+ try:
+ qdevice_net.client_destroy()
+ except:
+ # errors from deleting other files are suppressed as well
+ # we do not want to fail if qdevice was not set up
+ pass
def cluster_verify(argv):
nofilename = True
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index 927df35..2b39938 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -20,11 +20,17 @@ SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR"
AGENT_NOT_FOUND = "AGENT_NOT_FOUND"
BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
+CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
+CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
+CIB_ALERT_RECIPIENT_NOT_FOUND = "CIB_ALERT_RECIPIENT_NOT_FOUND"
CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING"
CIB_PUSH_ERROR = "CIB_PUSH_ERROR"
+CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED"
+CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION"
+CIB_UPGRADE_SUCCESSFUL = "CIB_UPGRADE_SUCCESSFUL"
CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES"
CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS'
CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
@@ -39,6 +45,9 @@ COROSYNC_CONFIG_RELOAD_ERROR = "COROSYNC_CONFIG_RELOAD_ERROR"
COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED"
COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR"
COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE"
+COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE"
+COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
+COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR"
COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
CRM_MON_ERROR = "CRM_MON_ERROR"
DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
@@ -56,11 +65,11 @@ INVALID_SCORE = "INVALID_SCORE"
INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE"
MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS"
NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL"
-NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR",
-NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED",
-NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED",
-NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT",
-NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND",
+NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR"
+NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED"
+NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED"
+NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT"
+NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND"
NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED"
NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED"
NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED"
@@ -68,16 +77,25 @@ NODE_NOT_FOUND = "NODE_NOT_FOUND"
NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH'
OMITTING_NODE = "OMITTING_NODE"
PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND"
-PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE",
-PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF",
-PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE",
+PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE"
+PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF"
+PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE"
QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED"
QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED"
+QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = "QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE"
+QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED = "QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED"
+QDEVICE_CERTIFICATE_REMOVAL_STARTED = "QDEVICE_CERTIFICATE_REMOVAL_STARTED"
+QDEVICE_CERTIFICATE_REMOVED_FROM_NODE = "QDEVICE_CERTIFICATE_REMOVED_FROM_NODE"
+QDEVICE_CERTIFICATE_IMPORT_ERROR = "QDEVICE_CERTIFICATE_IMPORT_ERROR"
+QDEVICE_CERTIFICATE_SIGN_ERROR = "QDEVICE_CERTIFICATE_SIGN_ERROR"
QDEVICE_DESTROY_ERROR = "QDEVICE_DESTROY_ERROR"
QDEVICE_DESTROY_SUCCESS = "QDEVICE_DESTROY_SUCCESS"
+QDEVICE_GET_STATUS_ERROR = "QDEVICE_GET_STATUS_ERROR"
QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR"
QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS"
QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED"
+QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED"
+QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED"
QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED"
REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING"
RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
@@ -100,12 +118,16 @@ SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED"
SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED"
SBD_NOT_ENABLED = "SBD_NOT_ENABLED"
SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR"
+SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED"
SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS"
SERVICE_ENABLE_ERROR = "SERVICE_ENABLE_ERROR"
+SERVICE_ENABLE_STARTED = "SERVICE_ENABLE_STARTED"
+SERVICE_ENABLE_SKIPPED = "SERVICE_ENABLE_SKIPPED"
SERVICE_ENABLE_SUCCESS = "SERVICE_ENABLE_SUCCESS"
SERVICE_KILL_ERROR = "SERVICE_KILL_ERROR"
SERVICE_KILL_SUCCESS = "SERVICE_KILL_SUCCESS"
SERVICE_START_ERROR = "SERVICE_START_ERROR"
+SERVICE_START_SKIPPED = "SERVICE_START_SKIPPED"
SERVICE_START_STARTED = "SERVICE_START_STARTED"
SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS"
SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR"
diff --git a/pcs/config.py b/pcs/config.py
index 51de822..4659c5b 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -38,6 +38,7 @@ from pcs import (
stonith,
usage,
utils,
+ alert,
)
from pcs.lib.errors import LibraryError
from pcs.lib.commands import quorum as lib_quorum
@@ -123,6 +124,9 @@ def config_show_cib():
ticket_command.show(lib, [], modificators)
print()
+ alert.print_alert_config(lib, [], modificators)
+
+ print()
del utils.pcs_options["--all"]
print("Resources Defaults:")
resource.show_defaults("rsc_defaults", indent=" ")
diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py
new file mode 100644
index 0000000..6b72996
--- /dev/null
+++ b/pcs/lib/cib/alert.py
@@ -0,0 +1,281 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.cib.nvpair import update_nvset, get_nvset
+from pcs.lib.cib.tools import (
+ check_new_id_applicable,
+ get_sub_element,
+ find_unique_id,
+ get_alerts,
+)
+
+
+def update_instance_attributes(tree, element, attribute_dict):
+ """
+ Updates instance attributes of element. Returns updated instance
+ attributes element.
+
+ tree -- cib etree node
+ element -- parent element of instance attributes
+ attribute_dict -- dictionary of nvpairs
+ """
+ return update_nvset("instance_attributes", tree, element, attribute_dict)
+
+
+def update_meta_attributes(tree, element, attribute_dict):
+ """
+ Updates meta attributes of element. Returns updated meta attributes element.
+
+ tree -- cib etree node
+ element -- parent element of meta attributes
+ attribute_dict -- dictionary of nvpairs
+ """
+ return update_nvset("meta_attributes", tree, element, attribute_dict)
+
+
+def _update_optional_attribute(element, attribute, value):
+ """
+ Update optional attribute of element. Remove existing element if value
+ is empty.
+
+ element -- parent element of specified attribute
+ attribute -- attribute to be updated
+ value -- new value
+ """
+ if value is None:
+ return
+ if value:
+ element.set(attribute, value)
+ elif attribute in element.attrib:
+ del element.attrib[attribute]
+
+
+def get_alert_by_id(tree, alert_id):
+ """
+ Returns alert element with specified id.
+ Raises AlertNotFound if alert with specified id doesn't exist.
+
+ tree -- cib etree node
+ alert_id -- id of alert
+ """
+ alert = get_alerts(tree).find("./alert[@id='{0}']".format(alert_id))
+ if alert is None:
+ raise LibraryError(reports.cib_alert_not_found(alert_id))
+ return alert
+
+
+def get_recipient(alert, recipient_value):
+ """
+ Returns recipient element with value recipient_value which belong to
+ specified alert.
+ Raises RecipientNotFound if recipient doesn't exist.
+
+ alert -- parent element of required recipient
+ recipient_value -- value of recipient
+ """
+ recipient = alert.find(
+ "./recipient[@value='{0}']".format(recipient_value)
+ )
+ if recipient is None:
+ raise LibraryError(reports.cib_alert_recipient_not_found(
+ alert.get("id"), recipient_value
+ ))
+ return recipient
+
+
+def create_alert(tree, alert_id, path, description=""):
+ """
+ Create new alert element. Returns newly created element.
+ Raises LibraryError if element with specified id already exists.
+
+ tree -- cib etree node
+ alert_id -- id of new alert, it will be generated if it is None
+ path -- path to script
+ description -- description
+ """
+ if alert_id:
+ check_new_id_applicable(tree, "alert-id", alert_id)
+ else:
+ alert_id = find_unique_id(tree, "alert")
+
+ alert = etree.SubElement(get_alerts(tree), "alert", id=alert_id, path=path)
+ if description:
+ alert.set("description", description)
+
+ return alert
+
+
+def update_alert(tree, alert_id, path, description=None):
+ """
+ Update existing alert. Return updated alert element.
+ Raises AlertNotFound if alert with specified id doesn't exist.
+
+ tree -- cib etree node
+ alert_id -- id of alert to be updated
+ path -- new value of path, stay unchanged if None
+ description -- new value of description, stay unchanged if None, remove
+ if empty
+ """
+ alert = get_alert_by_id(tree, alert_id)
+ if path:
+ alert.set("path", path)
+ _update_optional_attribute(alert, "description", description)
+ return alert
+
+
+def remove_alert(tree, alert_id):
+ """
+ Remove alert with specified id.
+ Raises AlertNotFound if alert with specified id doesn't exist.
+
+ tree -- cib etree node
+ alert_id -- id of alert which should be removed
+ """
+ alert = get_alert_by_id(tree, alert_id)
+ alert.getparent().remove(alert)
+
+
+def add_recipient(
+ tree,
+ alert_id,
+ recipient_value,
+ description=""
+):
+ """
+ Add recipient to alert with specified id. Returns added recipient element.
+ Raises AlertNotFound if alert with specified id doesn't exist.
+ Raises LibraryError if recipient already exists.
+
+ tree -- cib etree node
+ alert_id -- id of alert which should be parent of new recipient
+ recipient_value -- value of recipient
+ description -- description of recipient
+ """
+ alert = get_alert_by_id(tree, alert_id)
+
+ recipient = alert.find(
+ "./recipient[@value='{0}']".format(recipient_value)
+ )
+ if recipient is not None:
+ raise LibraryError(reports.cib_alert_recipient_already_exists(
+ alert_id, recipient_value
+ ))
+
+ recipient = etree.SubElement(
+ alert,
+ "recipient",
+ id=find_unique_id(tree, "{0}-recipient".format(alert_id)),
+ value=recipient_value
+ )
+
+ if description:
+ recipient.set("description", description)
+
+ return recipient
+
+
+def update_recipient(tree, alert_id, recipient_value, description):
+ """
+ Update specified recipient. Returns updated recipient element.
+ Raises AlertNotFound if alert with specified id doesn't exist.
+ Raises RecipientNotFound if recipient doesn't exist.
+
+ tree -- cib etree node
+ alert_id -- id of alert, parent element of recipient
+ recipient_value -- recipient value
+ description -- description, if empty it will be removed, stay unchanged
+ if None
+ """
+ recipient = get_recipient(
+ get_alert_by_id(tree, alert_id), recipient_value
+ )
+ _update_optional_attribute(recipient, "description", description)
+ return recipient
+
+
+def remove_recipient(tree, alert_id, recipient_value):
+ """
+ Remove specified recipient.
+ Raises AlertNotFound if alert with specified id doesn't exist.
+ Raises RecipientNotFound if recipient doesn't exist.
+
+ tree -- cib etree node
+ alert_id -- id of alert, parent element of recipient
+ recipient_value -- recipient value
+ """
+ recipient = get_recipient(
+ get_alert_by_id(tree, alert_id), recipient_value
+ )
+ recipient.getparent().remove(recipient)
+
+
+def get_all_recipients(alert):
+ """
+ Returns list of all recipient of specified alert. Format:
+ [
+ {
+ "id": <id of recipient>,
+ "value": <value of recipient>,
+ "description": <recipient description>,
+ "instance_attributes": <list of nvpairs>,
+ "meta_attributes": <list of nvpairs>
+ }
+ ]
+
+ alert -- parent element of recipients to return
+ """
+ recipient_list = []
+ for recipient in alert.findall("./recipient"):
+ recipient_list.append({
+ "id": recipient.get("id"),
+ "value": recipient.get("value"),
+ "description": recipient.get("description", ""),
+ "instance_attributes": get_nvset(
+ get_sub_element(recipient, "instance_attributes")
+ ),
+ "meta_attributes": get_nvset(
+ get_sub_element(recipient, "meta_attributes")
+ )
+ })
+ return recipient_list
+
+
+def get_all_alerts(tree):
+ """
+ Returns list of all alerts specified in tree. Format:
+ [
+ {
+ "id": <id of alert>,
+ "path": <path to script>,
+ "description": <alert description>,
+ "instance_attributes": <list of nvpairs>,
+ "meta_attributes": <list of nvpairs>,
+ "recipients_list": <list of alert's recipients>
+ }
+ ]
+
+ tree -- cib etree node
+ """
+ alert_list = []
+ for alert in get_alerts(tree).findall("./alert"):
+ alert_list.append({
+ "id": alert.get("id"),
+ "path": alert.get("path"),
+ "description": alert.get("description", ""),
+ "instance_attributes": get_nvset(
+ get_sub_element(alert, "instance_attributes")
+ ),
+ "meta_attributes": get_nvset(
+ get_sub_element(alert, "meta_attributes")
+ ),
+ "recipient_list": get_all_recipients(alert)
+ })
+ return alert_list
diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
new file mode 100644
index 0000000..d1a0cae
--- /dev/null
+++ b/pcs/lib/cib/nvpair.py
@@ -0,0 +1,90 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib.cib.tools import (
+ get_sub_element,
+ find_unique_id,
+)
+
+
+def update_nvpair(tree, element, name, value):
+ """
+ Update nvpair, create new if it doesn't yet exist or remove existing
+ nvpair if value is empty. Returns created/updated/removed nvpair element.
+
+ tree -- cib etree node
+ element -- element in which nvpair should be added/updated/removed
+ name -- name of nvpair
+ value -- value of nvpair
+ """
+ nvpair = element.find("./nvpair[@name='{0}']".format(name))
+ if nvpair is None:
+ if not value:
+ return None
+ nvpair_id = find_unique_id(
+ tree, "{0}-{1}".format(element.get("id"), name)
+ )
+ nvpair = etree.SubElement(
+ element, "nvpair", id=nvpair_id, name=name, value=value
+ )
+ else:
+ if value:
+ nvpair.set("value", value)
+ else:
+ # remove nvpair if value is empty
+ element.remove(nvpair)
+ return nvpair
+
+
+def update_nvset(tag_name, tree, element, attribute_dict):
+ """
+ This method updates nvset specified by tag_name. If specified nvset
+ doesn't exist it will be created. Returns updated nvset element or None if
+ attribute_dict is empty.
+
+ tag_name -- tag name of nvset element
+ tree -- cib etree node
+ element -- parent element of nvset
+ attribute_dict -- dictionary of nvpairs
+ """
+ if not attribute_dict:
+ return None
+
+ attributes = get_sub_element(element, tag_name, find_unique_id(
+ tree, "{0}-{1}".format(element.get("id"), tag_name)
+ ), 0)
+
+ for name, value in sorted(attribute_dict.items()):
+ update_nvpair(tree, attributes, name, value)
+
+ return attributes
+
+
+def get_nvset(nvset):
+ """
+ Returns nvset element as list of nvpairs with format:
+ [
+ {
+ "id": <id of nvpair>,
+ "name": <name of nvpair>,
+ "value": <value of nvpair>
+ },
+ ...
+ ]
+
+ nvset -- nvset element
+ """
+ nvpair_list = []
+ for nvpair in nvset.findall("./nvpair"):
+ nvpair_list.append({
+ "id": nvpair.get("id"),
+ "name": nvpair.get("name"),
+ "value": nvpair.get("value", "")
+ })
+ return nvpair_list
diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
new file mode 100644
index 0000000..c387aaf
--- /dev/null
+++ b/pcs/lib/cib/test/test_alert.py
@@ -0,0 +1,931 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from unittest import TestCase
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib import alert
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import(
+ assert_raise_library_error,
+ assert_xml_equal,
+)
+from pcs.test.tools.pcs_mock import mock
+
+
+ at mock.patch("pcs.lib.cib.alert.update_nvset")
+class UpdateInstanceAttributesTest(TestCase):
+ def test_success(self, mock_update_nvset):
+ ret_val = etree.Element("nvset")
+ tree = etree.Element("tree")
+ element = etree.Element("element")
+ attributes = {"a": 1}
+ mock_update_nvset.return_value = ret_val
+ self.assertEqual(
+ alert.update_instance_attributes(tree, element, attributes),
+ ret_val
+ )
+ mock_update_nvset.assert_called_once_with(
+ "instance_attributes", tree, element, attributes
+ )
+
+
+ at mock.patch("pcs.lib.cib.alert.update_nvset")
+class UpdateMetaAttributesTest(TestCase):
+ def test_success(self, mock_update_nvset):
+ ret_val = etree.Element("nvset")
+ tree = etree.Element("tree")
+ element = etree.Element("element")
+ attributes = {"a": 1}
+ mock_update_nvset.return_value = ret_val
+ self.assertEqual(
+ alert.update_meta_attributes(tree, element, attributes),
+ ret_val
+ )
+ mock_update_nvset.assert_called_once_with(
+ "meta_attributes", tree, element, attributes
+ )
+
+
+class UpdateOptionalAttributeTest(TestCase):
+ def test_add(self):
+ element = etree.Element("element")
+ alert._update_optional_attribute(element, "attr", "value1")
+ self.assertEqual(element.get("attr"), "value1")
+
+ def test_update(self):
+ element = etree.Element("element", attr="value")
+ alert._update_optional_attribute(element, "attr", "value1")
+ self.assertEqual(element.get("attr"), "value1")
+
+ def test_remove(self):
+ element = etree.Element("element", attr="value")
+ alert._update_optional_attribute(element, "attr", "")
+ self.assertTrue(element.get("attr") is None)
+
+
+class GetAlertByIdTest(TestCase):
+ def test_found(self):
+ xml = """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert-1"/>
+ <alert id="alert-2"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ assert_xml_equal(
+ '<alert id="alert-2"/>',
+ etree.tostring(
+ alert.get_alert_by_id(etree.XML(xml), "alert-2")
+ ).decode()
+ )
+
+ def test_different_place(self):
+ xml = """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert-1"/>
+ </alerts>
+ <alert id="alert-2"/>
+ </configuration>
+ </cib>
+ """
+ assert_raise_library_error(
+ lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "alert-2"}
+ )
+ )
+
+ def test_not_exist(self):
+ xml = """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert-1"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ assert_raise_library_error(
+ lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "alert-2"}
+ )
+ )
+
+
+class GetRecipientTest(TestCase):
+ def setUp(self):
+ self.xml = etree.XML(
+ """
+ <alert id="alert-1">
+ <recipient id="rec-1" value="value1"/>
+ <recipient id="rec-2" value="value2"/>
+ <not_recipient value="value3"/>
+ <recipients>
+ <recipient id="rec-4" value="value4"/>
+ </recipients>
+ </alert>
+ """
+ )
+
+ def test_exist(self):
+ assert_xml_equal(
+ '<recipient id="rec-2" value="value2"/>',
+ etree.tostring(alert.get_recipient(self.xml, "value2")).decode()
+ )
+
+ def test_different_place(self):
+ assert_raise_library_error(
+ lambda: alert.get_recipient(self.xml, "value4"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+ {
+ "alert": "alert-1",
+ "recipient": "value4"
+ }
+ )
+ )
+
+ def test_not_recipient(self):
+ assert_raise_library_error(
+ lambda: alert.get_recipient(self.xml, "value3"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+ {
+ "alert": "alert-1",
+ "recipient": "value3"
+ }
+ )
+ )
+
+
+class CreateAlertTest(TestCase):
+ def setUp(self):
+ self.tree = etree.XML(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def test_no_alerts(self):
+ tree = etree.XML(
+ """
+ <cib>
+ <configuration/>
+ </cib>
+ """
+ )
+ assert_xml_equal(
+ '<alert id="my-alert" path="/test/path"/>',
+ etree.tostring(
+ alert.create_alert(tree, "my-alert", "/test/path")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="my-alert" path="/test/path"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(tree).decode()
+ )
+
+ def test_alerts_exists(self):
+ assert_xml_equal(
+ '<alert id="my-alert" path="/test/path"/>',
+ etree.tostring(
+ alert.create_alert(self.tree, "my-alert", "/test/path")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert"/>
+ <alert id="my-alert" path="/test/path"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_alerts_exists_with_description(self):
+ assert_xml_equal(
+ '<alert id="my-alert" path="/test/path" description="nothing"/>',
+ etree.tostring(alert.create_alert(
+ self.tree, "my-alert", "/test/path", "nothing"
+ )).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert"/>
+ <alert
+ id="my-alert"
+ path="/test/path"
+ description="nothing"
+ />
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_invalid_id(self):
+ assert_raise_library_error(
+ lambda: alert.create_alert(self.tree, "1alert", "/path"),
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "id": "1alert",
+ "id_description": "alert-id",
+ "invalid_character": "1",
+ "reason": "invalid first character"
+ }
+ )
+ )
+
+ def test_id_exists(self):
+ assert_raise_library_error(
+ lambda: alert.create_alert(self.tree, "alert", "/path"),
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {"id": "alert"}
+ )
+ )
+
+ def test_no_id(self):
+ assert_xml_equal(
+ '<alert id="alert-1" path="/test/path"/>',
+ etree.tostring(
+ alert.create_alert(self.tree, None, "/test/path")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert"/>
+ <alert id="alert-1" path="/test/path"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+
+class UpdateAlertTest(TestCase):
+ def setUp(self):
+ self.tree = etree.XML(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path"/>
+ <alert id="alert1" path="/path1" description="nothing"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def test_update_path(self):
+ assert_xml_equal(
+ '<alert id="alert" path="/test/path"/>',
+ etree.tostring(
+ alert.update_alert(self.tree, "alert", "/test/path")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/test/path"/>
+ <alert id="alert1" path="/path1" description="nothing"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_remove_path(self):
+ assert_xml_equal(
+ '<alert id="alert" path="/path"/>',
+ etree.tostring(alert.update_alert(self.tree, "alert", "")).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path"/>
+ <alert id="alert1" path="/path1" description="nothing"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_update_description(self):
+ assert_xml_equal(
+ '<alert id="alert" path="/path" description="desc"/>',
+ etree.tostring(
+ alert.update_alert(self.tree, "alert", None, "desc")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path" description="desc"/>
+ <alert id="alert1" path="/path1" description="nothing"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_remove_description(self):
+ assert_xml_equal(
+ '<alert id="alert1" path="/path1"/>',
+ etree.tostring(
+ alert.update_alert(self.tree, "alert1", None, "")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path"/>
+ <alert id="alert1" path="/path1"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_id_not_exists(self):
+ assert_raise_library_error(
+ lambda: alert.update_alert(self.tree, "alert0", "/test"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "alert0"}
+ )
+ )
+
+
+class RemoveAlertTest(TestCase):
+ def setUp(self):
+ self.tree = etree.XML(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path"/>
+ <alert id="alert-1" path="/next"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def test_success(self):
+ alert.remove_alert(self.tree, "alert")
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert-1" path="/next"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_not_existing_id(self):
+ assert_raise_library_error(
+ lambda: alert.remove_alert(self.tree, "not-existing-id"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "not-existing-id"}
+ )
+ )
+
+
+class AddRecipientTest(TestCase):
+ def setUp(self):
+ self.tree = etree.XML(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def test_success(self):
+ assert_xml_equal(
+ '<recipient id="alert-recipient-1" value="value1"/>',
+ etree.tostring(
+ alert.add_recipient(self.tree, "alert", "value1")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ <recipient id="alert-recipient-1" value="value1"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_recipient_exist(self):
+ assert_raise_library_error(
+ lambda: alert.add_recipient(self.tree, "alert", "test_val"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+ {
+ "recipient": "test_val",
+ "alert": "alert"
+ }
+ )
+ )
+
+ def test_alert_not_exist(self):
+ assert_raise_library_error(
+ lambda: alert.add_recipient(self.tree, "alert1", "test_val"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "alert1"}
+ )
+ )
+
+ def test_with_description(self):
+ assert_xml_equal(
+ """
+ <recipient
+ id="alert-recipient-1"
+ value="value1"
+ description="desc"
+ />
+ """,
+ etree.tostring(alert.add_recipient(
+ self.tree, "alert", "value1", "desc"
+ )).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ <recipient
+ id="alert-recipient-1"
+ value="value1"
+ description="desc"
+ />
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+
+class UpdateRecipientTest(TestCase):
+ def setUp(self):
+ self.tree = etree.XML(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ <recipient
+ id="alert-recipient-1"
+ value="value1"
+ description="desc"
+ />
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def test_add_description(self):
+ assert_xml_equal(
+ """
+ <recipient
+ id="alert-recipient" value="test_val" description="description"
+ />
+ """,
+ etree.tostring(alert.update_recipient(
+ self.tree, "alert", "test_val", "description"
+ )).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient
+ id="alert-recipient"
+ value="test_val"
+ description="description"
+ />
+ <recipient
+ id="alert-recipient-1"
+ value="value1"
+ description="desc"
+ />
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_update_description(self):
+ assert_xml_equal(
+ """
+ <recipient
+ id="alert-recipient-1" value="value1" description="description"
+ />
+ """,
+ etree.tostring(alert.update_recipient(
+ self.tree, "alert", "value1", "description"
+ )).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ <recipient
+ id="alert-recipient-1"
+ value="value1"
+ description="description"
+ />
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_remove_description(self):
+ assert_xml_equal(
+ """
+ <recipient id="alert-recipient-1" value="value1"/>
+ """,
+ etree.tostring(
+ alert.update_recipient(self.tree, "alert", "value1", "")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ <recipient id="alert-recipient-1" value="value1"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_alert_not_exists(self):
+ assert_raise_library_error(
+ lambda: alert.update_recipient(self.tree, "alert1", "test_val", ""),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "alert1"}
+ )
+ )
+
+ def test_recipient_not_exists(self):
+ assert_raise_library_error(
+ lambda: alert.update_recipient(self.tree, "alert", "unknown", ""),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+ {
+ "alert": "alert",
+ "recipient": "unknown"
+ }
+ )
+ )
+
+
+class RemoveRecipientTest(TestCase):
+ def setUp(self):
+ self.tree = etree.XML(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ <recipient id="alert-recipient-2" value="val"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def test_success(self):
+ alert.remove_recipient(self.tree, "alert", "val")
+ assert_xml_equal(
+ """
+ <cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ etree.tostring(self.tree).decode()
+ )
+
+ def test_alert_not_exists(self):
+ assert_raise_library_error(
+ lambda: alert.remove_recipient(self.tree, "alert1", "test_val"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "alert1"}
+ )
+ )
+
+ def test_recipient_not_exists(self):
+ assert_raise_library_error(
+ lambda: alert.remove_recipient(self.tree, "alert", "unknown"),
+ (
+ severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+ {
+ "alert": "alert",
+ "recipient": "unknown"
+ }
+ )
+ )
+
+
+class GetAllRecipientsTest(TestCase):
+ def test_success(self):
+ alert_obj = etree.XML(
+ """
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val">
+ <instance_attributes>
+ <nvpair
+ id="nvset-name1-value1" name="name1" value="value1"
+ />
+ <nvpair
+ id="nvset-name2-value2" name="name2" value="value2"
+ />
+ </instance_attributes>
+ <meta_attributes>
+ <nvpair id="nvset-name3" name="name3"/>
+ </meta_attributes>
+ </recipient>
+ <recipient
+ id="alert-recipient-1" value="value1" description="desc"
+ />
+ </alert>
+ """
+ )
+ self.assertEqual(
+ [
+ {
+ "id": "alert-recipient",
+ "value": "test_val",
+ "description": "",
+ "instance_attributes": [
+ {
+ "id": "nvset-name1-value1",
+ "name": "name1",
+ "value": "value1"
+ },
+ {
+ "id": "nvset-name2-value2",
+ "name": "name2",
+ "value": "value2"
+ }
+ ],
+ "meta_attributes": [
+ {
+ "id": "nvset-name3",
+ "name": "name3",
+ "value": ""
+ }
+ ]
+ },
+ {
+ "id": "alert-recipient-1",
+ "value": "value1",
+ "description": "desc",
+ "instance_attributes": [],
+ "meta_attributes": []
+ }
+ ],
+ alert.get_all_recipients(alert_obj)
+ )
+
+
+class GetAllAlertsTest(TestCase):
+ def test_success(self):
+ alerts = etree.XML(
+ """
+<cib>
+ <configuration>
+ <alerts>
+ <alert id="alert" path="/path">
+ <recipient id="alert-recipient" value="test_val">
+ <instance_attributes>
+ <nvpair
+ id="instance_attributes-name1-value1"
+ name="name1"
+ value="value1"
+ />
+ <nvpair
+ id="instance_attributes-name2-value2"
+ name="name2"
+ value="value2"
+ />
+ </instance_attributes>
+ <meta_attributes>
+ <nvpair id="meta_attributes-name3" name="name3"/>
+ </meta_attributes>
+ </recipient>
+ <recipient
+ id="alert-recipient-1" value="value1" description="desc"
+ />
+ </alert>
+ <alert id="alert1" path="/test/path" description="desc">
+ <instance_attributes>
+ <nvpair
+ id="alert1-name1-value1" name="name1" value="value1"
+ />
+ <nvpair
+ id="alert1-name2-value2" name="name2" value="value2"
+ />
+ </instance_attributes>
+ <meta_attributes>
+ <nvpair id="alert1-name3" name="name3"/>
+ </meta_attributes>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """
+ )
+ self.assertEqual(
+ [
+ {
+ "id": "alert",
+ "path": "/path",
+ "description": "",
+ "instance_attributes": [],
+ "meta_attributes": [],
+ "recipient_list": [
+ {
+ "id": "alert-recipient",
+ "value": "test_val",
+ "description": "",
+ "instance_attributes": [
+ {
+ "id": "instance_attributes-name1-value1",
+ "name": "name1",
+ "value": "value1"
+ },
+ {
+ "id": "instance_attributes-name2-value2",
+ "name": "name2",
+ "value": "value2"
+ }
+ ],
+ "meta_attributes": [
+ {
+ "id": "meta_attributes-name3",
+ "name": "name3",
+ "value": ""
+ }
+ ]
+ },
+ {
+ "id": "alert-recipient-1",
+ "value": "value1",
+ "description": "desc",
+ "instance_attributes": [],
+ "meta_attributes": []
+ }
+ ]
+ },
+ {
+ "id": "alert1",
+ "path": "/test/path",
+ "description": "desc",
+ "instance_attributes": [
+ {
+ "id": "alert1-name1-value1",
+ "name": "name1",
+ "value": "value1"
+ },
+ {
+ "id": "alert1-name2-value2",
+ "name": "name2",
+ "value": "value2"
+ }
+ ],
+ "meta_attributes": [
+ {
+ "id": "alert1-name3",
+ "name": "name3",
+ "value": ""
+ }
+ ],
+ "recipient_list": []
+ }
+ ],
+ alert.get_all_alerts(alerts)
+ )
diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
new file mode 100644
index 0000000..6907f25
--- /dev/null
+++ b/pcs/lib/cib/test/test_nvpair.py
@@ -0,0 +1,206 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from unittest import TestCase
+
+from lxml import etree
+
+from pcs.lib.cib import nvpair
+from pcs.test.tools.assertions import assert_xml_equal
+
+
+class UpdateNvpairTest(TestCase):
+ def setUp(self):
+ self.nvset = etree.Element("nvset", id="nvset")
+ etree.SubElement(
+ self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
+ )
+ etree.SubElement(
+ self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
+ )
+ etree.SubElement(
+ self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
+ )
+
+ def test_update(self):
+ assert_xml_equal(
+ "<nvpair id='nvset-attr' name='attr' value='10'/>",
+ etree.tostring(
+ nvpair.update_nvpair(self.nvset, self.nvset, "attr", "10")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <nvset id="nvset">
+ <nvpair id="nvset-attr" name="attr" value="10"/>
+ <nvpair id="nvset-attr2" name="attr2" value="2"/>
+ <notnvpair id="nvset-test" name="test" value="0"/>
+ </nvset>
+ """,
+ etree.tostring(self.nvset).decode()
+ )
+
+ def test_add(self):
+ assert_xml_equal(
+ "<nvpair id='nvset-test-1' name='test' value='0'/>",
+ etree.tostring(
+ nvpair.update_nvpair(self.nvset, self.nvset, "test", "0")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <nvset id="nvset">
+ <nvpair id="nvset-attr" name="attr" value="1"/>
+ <nvpair id="nvset-attr2" name="attr2" value="2"/>
+ <notnvpair id="nvset-test" name="test" value="0"/>
+ <nvpair id="nvset-test-1" name="test" value="0"/>
+ </nvset>
+ """,
+ etree.tostring(self.nvset).decode()
+ )
+
+ def test_remove(self):
+ assert_xml_equal(
+ "<nvpair id='nvset-attr2' name='attr2' value='2'/>",
+ etree.tostring(
+ nvpair.update_nvpair(self.nvset, self.nvset, "attr2", "")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <nvset id="nvset">
+ <nvpair id="nvset-attr" name="attr" value="1"/>
+ <notnvpair id="nvset-test" name="test" value="0"/>
+ </nvset>
+ """,
+ etree.tostring(self.nvset).decode()
+ )
+
+ def test_remove_not_existing(self):
+ self.assertTrue(
+ nvpair.update_nvpair(self.nvset, self.nvset, "attr3", "") is None
+ )
+ assert_xml_equal(
+ """
+ <nvset id="nvset">
+ <nvpair id="nvset-attr" name="attr" value="1"/>
+ <nvpair id="nvset-attr2" name="attr2" value="2"/>
+ <notnvpair id="nvset-test" name="test" value="0"/>
+ </nvset>
+ """,
+ etree.tostring(self.nvset).decode()
+ )
+
+
+class UpdateNvsetTest(TestCase):
+ def setUp(self):
+ self.root = etree.Element("root", id="root")
+ self.nvset = etree.SubElement(self.root, "nvset", id="nvset")
+ etree.SubElement(
+ self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
+ )
+ etree.SubElement(
+ self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
+ )
+ etree.SubElement(
+ self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
+ )
+
+ def test_None(self):
+ self.assertTrue(
+ nvpair.update_nvset("nvset", self.root, self.root, None) is None
+ )
+
+ def test_empty(self):
+ self.assertTrue(
+ nvpair.update_nvset("nvset", self.root, self.root, {}) is None
+ )
+
+ def test_existing(self):
+ self.assertEqual(
+ self.nvset,
+ nvpair.update_nvset("nvset", self.root, self.root, {
+ "attr": "10",
+ "new_one": "20",
+ "test": "0",
+ "attr2": ""
+ })
+ )
+ assert_xml_equal(
+ """
+ <nvset id="nvset">
+ <nvpair id="nvset-attr" name="attr" value="10"/>
+ <notnvpair id="nvset-test" name="test" value="0"/>
+ <nvpair id="nvset-new_one" name="new_one" value="20"/>
+ <nvpair id="nvset-test-1" name="test" value="0"/>
+ </nvset>
+ """,
+ etree.tostring(self.nvset).decode()
+ )
+
+ def test_new(self):
+ root = etree.Element("root", id="root")
+ assert_xml_equal(
+ """
+ <nvset id="root-nvset">
+ <nvpair id="root-nvset-attr" name="attr" value="10"/>
+ <nvpair id="root-nvset-new_one" name="new_one" value="20"/>
+ <nvpair id="root-nvset-test" name="test" value="0"/>
+ </nvset>
+ """,
+ etree.tostring(nvpair.update_nvset("nvset", root, root, {
+ "attr": "10",
+ "new_one": "20",
+ "test": "0",
+ "attr2": ""
+ })).decode()
+ )
+ assert_xml_equal(
+ """
+ <root id="root">
+ <nvset id="root-nvset">
+ <nvpair id="root-nvset-attr" name="attr" value="10"/>
+ <nvpair id="root-nvset-new_one" name="new_one" value="20"/>
+ <nvpair id="root-nvset-test" name="test" value="0"/>
+ </nvset>
+ </root>
+ """,
+ etree.tostring(root).decode()
+ )
+
+
+class GetNvsetTest(TestCase):
+ def test_success(self):
+ nvset = etree.XML(
+ """
+ <nvset>
+ <nvpair id="nvset-name1" name="name1" value="value1"/>
+ <nvpair id="nvset-name2" name="name2" value="value2"/>
+ <nvpair id="nvset-name3" name="name3"/>
+ </nvset>
+ """
+ )
+ self.assertEqual(
+ [
+ {
+ "id": "nvset-name1",
+ "name": "name1",
+ "value": "value1"
+ },
+ {
+ "id": "nvset-name2",
+ "name": "name2",
+ "value": "value2"
+ },
+ {
+ "id": "nvset-name3",
+ "name": "name3",
+ "value": ""
+ }
+ ],
+ nvpair.get_nvset(nvset)
+ )
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index dfe31fc..b59d50d 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -5,8 +5,12 @@ from __future__ import (
unicode_literals,
)
+import os
+import re
+import tempfile
from lxml import etree
+from pcs import settings
from pcs.lib import reports
from pcs.lib.errors import LibraryError
from pcs.lib.pacemaker_values import validate_id
@@ -71,6 +75,15 @@ def get_acls(tree):
acls = etree.SubElement(get_configuration(tree), "acls")
return acls
+
+def get_alerts(tree):
+ """
+ Return 'alerts' element from tree, create a new one if missing
+ tree -- cib etree node
+ """
+ return get_sub_element(get_configuration(tree), "alerts")
+
+
def get_constraints(tree):
"""
Return 'constraint' element from tree
@@ -87,3 +100,117 @@ def find_parent(element, tag_names):
def export_attributes(element):
return dict((key, value) for key, value in element.attrib.items())
+
+
+def get_sub_element(element, sub_element_tag, new_id=None, new_index=None):
+ """
+ Returns sub-element sub_element_tag of element. It will create new
+ element if such doesn't exist yet. Id of new element will be new_if if
+ it's not None. new_index specify where will be new element added, if None
+ it will be appended.
+
+ element -- parent element
+ sub_element_tag -- tag of wanted element
+ new_id -- id of new element
+ new_index -- index for new element
+ """
+ sub_element = element.find("./{0}".format(sub_element_tag))
+ if sub_element is None:
+ sub_element = etree.Element(sub_element_tag)
+ if new_id:
+ sub_element.set("id", new_id)
+ if new_index is None:
+ element.append(sub_element)
+ else:
+ element.insert(new_index, sub_element)
+ return sub_element
+
+
+def get_pacemaker_version_by_which_cib_was_validated(cib):
+ """
+ Return version of pacemaker which validated specified cib as tree.
+ Version is returned as tuple of integers: (<major>, <minor>, <revision>).
+ Raises LibraryError on any failure.
+
+ cib -- cib etree
+ """
+ version = cib.get("validate-with")
+ if version is None:
+ raise LibraryError(reports.cib_load_error_invalid_format())
+
+ regexp = re.compile(
+ r"pacemaker-(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?"
+ )
+ match = regexp.match(version)
+ if not match:
+ raise LibraryError(reports.cib_load_error_invalid_format())
+ return (
+ int(match.group("major")),
+ int(match.group("minor")),
+ int(match.group("rev") or 0)
+ )
+
+
+def upgrade_cib(cib, runner):
+ """
+ Upgrade CIB to the latest schema of installed pacemaker. Returns upgraded
+ CIB as string.
+ Raises LibraryError on any failure.
+
+ cib -- cib etree
+ runner -- CommandRunner
+ """
+ temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
+ temp_file.write(etree.tostring(cib).decode())
+ temp_file.flush()
+ output, retval = runner.run(
+ [
+ os.path.join(settings.pacemaker_binaries, "cibadmin"),
+ "--upgrade",
+ "--force"
+ ],
+ env_extend={"CIB_file": temp_file.name}
+ )
+
+ if retval != 0:
+ temp_file.close()
+ LibraryError(reports.cib_upgrade_failed(output))
+
+ try:
+ temp_file.seek(0)
+ return etree.fromstring(temp_file.read())
+ except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e:
+ LibraryError(reports.cib_upgrade_failed(str(e)))
+ finally:
+ temp_file.close()
+
+
+def ensure_cib_version(runner, cib, version):
+ """
+ This method ensures that specified cib is verified by pacemaker with
+ version 'version' or newer. If cib doesn't correspond to this version,
+ method will try to upgrade cib.
+ Returns cib which was verified by pacemaker version 'version' or later.
+ Raises LibraryError on any failure.
+
+ runner -- CommandRunner
+ cib -- cib tree
+ version -- tuple of integers (<major>, <minor>, <revision>)
+ """
+ current_version = get_pacemaker_version_by_which_cib_was_validated(
+ cib
+ )
+ if current_version >= version:
+ return None
+
+ upgraded_cib = upgrade_cib(cib, runner)
+ current_version = get_pacemaker_version_by_which_cib_was_validated(
+ upgraded_cib
+ )
+
+ if current_version >= version:
+ return upgraded_cib
+
+ raise LibraryError(reports.unable_to_upgrade_cib_to_required_version(
+ current_version, version
+ ))
diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
new file mode 100644
index 0000000..7371fbc
--- /dev/null
+++ b/pcs/lib/commands/alert.py
@@ -0,0 +1,169 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.lib import reports
+from pcs.lib.cib import alert
+from pcs.lib.errors import LibraryError
+
+
+REQUIRED_CIB_VERSION = (2, 5, 0)
+
+
+def create_alert(
+ lib_env,
+ alert_id,
+ path,
+ instance_attribute_dict,
+ meta_attribute_dict,
+ description=None
+):
+ """
+ Create new alert.
+ Raises LibraryError if path is not specified, or any other failure.
+
+ lib_env -- LibraryEnvironment
+ alert_id -- id of alert to be created, if None it will be generated
+ path -- path to script for alert
+ instance_attribute_dict -- dictionary of instance attributes
+ meta_attribute_dict -- dictionary of meta attributes
+ description -- alert description description
+ """
+ if not path:
+ raise LibraryError(reports.required_option_is_missing("path"))
+
+ cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+
+ alert_el = alert.create_alert(cib, alert_id, path, description)
+ alert.update_instance_attributes(cib, alert_el, instance_attribute_dict)
+ alert.update_meta_attributes(cib, alert_el, meta_attribute_dict)
+
+ lib_env.push_cib(cib)
+
+
+def update_alert(
+ lib_env,
+ alert_id,
+ path,
+ instance_attribute_dict,
+ meta_attribute_dict,
+ description=None
+):
+ """
+ Update existing alert with specified id.
+
+ lib_env -- LibraryEnvironment
+ alert_id -- id of alert to be updated
+ path -- new path, if None old value will stay unchanged
+ instance_attribute_dict -- dictionary of instance attributes to update
+ meta_attribute_dict -- dictionary of meta attributes to update
+ description -- new description, if empty string, old description will be
+ deleted, if None old value will stay unchanged
+ """
+ cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+
+ alert_el = alert.update_alert(cib, alert_id, path, description)
+ alert.update_instance_attributes(cib, alert_el, instance_attribute_dict)
+ alert.update_meta_attributes(cib, alert_el, meta_attribute_dict)
+
+ lib_env.push_cib(cib)
+
+
+def remove_alert(lib_env, alert_id):
+ """
+ Remove alert with specified id.
+
+ lib_env -- LibraryEnvironment
+ alert_id -- id of alert which should be removed
+ """
+ cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+ alert.remove_alert(cib, alert_id)
+ lib_env.push_cib(cib)
+
+
+def add_recipient(
+ lib_env,
+ alert_id,
+ recipient_value,
+ instance_attribute_dict,
+ meta_attribute_dict,
+ description=None
+):
+ """
+ Add new recipient to alert witch id alert_id.
+
+ lib_env -- LibraryEnvironment
+ alert_id -- id of alert to which new recipient should be added
+ recipient_value -- value of new recipient
+ instance_attribute_dict -- dictionary of instance attributes to update
+ meta_attribute_dict -- dictionary of meta attributes to update
+ description -- recipient description
+ """
+ if not recipient_value:
+ raise LibraryError(
+ reports.required_option_is_missing("value")
+ )
+
+ cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+ recipient = alert.add_recipient(
+ cib, alert_id, recipient_value, description
+ )
+ alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
+ alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
+
+ lib_env.push_cib(cib)
+
+
+def update_recipient(
+ lib_env,
+ alert_id,
+ recipient_value,
+ instance_attribute_dict,
+ meta_attribute_dict,
+ description=None
+):
+ """
+ Update existing recipient.
+
+ lib_env -- LibraryEnvironment
+ alert_id -- id of alert to which recipient belong
+ recipient_value -- recipient to be updated
+ instance_attribute_dict -- dictionary of instance attributes to update
+ meta_attribute_dict -- dictionary of meta attributes to update
+ description -- new description, if empty string, old description will be
+ deleted, if None old value will stay unchanged
+ """
+ cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+ recipient = alert.update_recipient(
+ cib, alert_id, recipient_value, description
+ )
+ alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
+ alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
+
+ lib_env.push_cib(cib)
+
+
+def remove_recipient(lib_env, alert_id, recipient_value):
+ """
+ Remove existing recipient.
+
+ lib_env -- LibraryEnvironment
+ alert_id -- id of alert to which recipient belong
+ recipient_value -- recipient to be removed
+ """
+ cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+ alert.remove_recipient(cib, alert_id, recipient_value)
+ lib_env.push_cib(cib)
+
+
+def get_all_alerts(lib_env):
+ """
+ Returns list of all alerts. See docs of pcs.lib.cib.alert.get_all_alerts for
+ description of data format.
+
+ lib_env -- LibraryEnvironment
+ """
+ return alert.get_all_alerts(lib_env.get_cib())
diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
index c300a4c..1d1d85f 100644
--- a/pcs/lib/commands/qdevice.py
+++ b/pcs/lib/commands/qdevice.py
@@ -5,6 +5,9 @@ from __future__ import (
unicode_literals,
)
+import base64
+import binascii
+
from pcs.lib import external, reports
from pcs.lib.corosync import qdevice_net
from pcs.lib.errors import LibraryError
@@ -31,7 +34,7 @@ def qdevice_setup(lib_env, model, enable, start):
def qdevice_destroy(lib_env, model):
"""
Stop and disable qdevice on local host and remove its configuration
- string model qdevice model to initialize
+ string model qdevice model to destroy
"""
_ensure_not_cman(lib_env)
_check_model(model)
@@ -40,6 +43,22 @@ def qdevice_destroy(lib_env, model):
qdevice_net.qdevice_destroy()
lib_env.report_processor.process(reports.qdevice_destroy_success(model))
+def qdevice_status_text(lib_env, model, verbose=False, cluster=None):
+ """
+ Get runtime status of a quorum device in plain text
+ string model qdevice model to query
+ bool verbose get more detailed output
+ string cluster show information only about specified cluster
+ """
+ _ensure_not_cman(lib_env)
+ _check_model(model)
+ runner = lib_env.cmd_runner()
+ return (
+ qdevice_net.qdevice_status_generic_text(runner, verbose)
+ +
+ qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose)
+ )
+
def qdevice_enable(lib_env, model):
"""
make qdevice start automatically on boot on local host
@@ -80,6 +99,73 @@ def qdevice_kill(lib_env, model):
_check_model(model)
_service_kill(lib_env, qdevice_net.qdevice_kill)
+def qdevice_net_sign_certificate_request(
+ lib_env, certificate_request, cluster_name
+):
+ """
+ Sign node certificate request by qnetd CA
+ string certificate_request base64 encoded certificate request
+ string cluster_name name of the cluster to which qdevice is being added
+ """
+ _ensure_not_cman(lib_env)
+ try:
+ certificate_request_data = base64.b64decode(certificate_request)
+ except (TypeError, binascii.Error):
+ raise LibraryError(reports.invalid_option_value(
+ "qnetd certificate request",
+ certificate_request,
+ ["base64 encoded certificate"]
+ ))
+ return base64.b64encode(
+ qdevice_net.qdevice_sign_certificate_request(
+ lib_env.cmd_runner(),
+ certificate_request_data,
+ cluster_name
+ )
+ )
+
+def client_net_setup(lib_env, ca_certificate):
+ """
+ Intialize qdevice net client on local host
+ ca_certificate base64 encoded qnetd CA certificate
+ """
+ _ensure_not_cman(lib_env)
+ try:
+ ca_certificate_data = base64.b64decode(ca_certificate)
+ except (TypeError, binascii.Error):
+ raise LibraryError(reports.invalid_option_value(
+ "qnetd CA certificate",
+ ca_certificate,
+ ["base64 encoded certificate"]
+ ))
+ qdevice_net.client_setup(lib_env.cmd_runner(), ca_certificate_data)
+
+def client_net_import_certificate(lib_env, certificate):
+ """
+ Import qnetd client certificate to local node certificate storage
+ certificate base64 encoded qnetd client certificate
+ """
+ _ensure_not_cman(lib_env)
+ try:
+ certificate_data = base64.b64decode(certificate)
+ except (TypeError, binascii.Error):
+ raise LibraryError(reports.invalid_option_value(
+ "qnetd client certificate",
+ certificate,
+ ["base64 encoded certificate"]
+ ))
+ qdevice_net.client_import_certificate_and_key(
+ lib_env.cmd_runner(),
+ certificate_data
+ )
+
+def client_net_destroy(lib_env):
+ """
+ delete qdevice client config files on local host
+ """
+ _ensure_not_cman(lib_env)
+ qdevice_net.client_destroy()
+
def _ensure_not_cman(lib_env):
if lib_env.is_cman_cluster:
raise LibraryError(reports.cman_unsupported_command())
diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
index 1ee5411..7425e78 100644
--- a/pcs/lib/commands/quorum.py
+++ b/pcs/lib/commands/quorum.py
@@ -5,9 +5,18 @@ from __future__ import (
unicode_literals,
)
-
from pcs.lib import reports
from pcs.lib.errors import LibraryError
+from pcs.lib.corosync import (
+ live as corosync_live,
+ qdevice_net,
+ qdevice_client
+)
+from pcs.lib.external import (
+ NodeCommunicationException,
+ node_communicator_exception_to_report_item,
+ parallel_nodes_communication_helper,
+)
def get_config(lib_env):
@@ -42,6 +51,21 @@ def set_options(lib_env, options, skip_offline_nodes=False):
cfg.set_quorum_options(lib_env.report_processor, options)
lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+def status_text(lib_env):
+ """
+ Get quorum runtime status in plain text
+ """
+ __ensure_not_cman(lib_env)
+ return corosync_live.get_quorum_status_text(lib_env.cmd_runner())
+
+def status_device_text(lib_env, verbose=False):
+ """
+ Get quorum device client runtime status in plain text
+ bool verbose get more detailed output
+ """
+ __ensure_not_cman(lib_env)
+ return qdevice_client.get_status_text(lib_env.cmd_runner(), verbose)
+
def add_device(
lib_env, model, model_options, generic_options, force_model=False,
force_options=False, skip_offline_nodes=False
@@ -58,6 +82,8 @@ def add_device(
__ensure_not_cman(lib_env)
cfg = lib_env.get_corosync_conf()
+ # Try adding qdevice to corosync.conf. This validates all the options and
+ # makes sure qdevice is not defined in corosync.conf yet.
cfg.add_quorum_device(
lib_env.report_processor,
model,
@@ -66,9 +92,131 @@ def add_device(
force_model,
force_options
)
- # TODO validation, verification, certificates, etc.
+
+ # First setup certificates for qdevice, then send corosync.conf to nodes.
+ # If anything fails, nodes will not have corosync.conf with qdevice in it,
+ # so there is no effect on the cluster.
+ if lib_env.is_corosync_conf_live:
+ # do model specific configuration
+ # if model is not known to pcs and was forced, do not configure antyhing
+ # else but corosync.conf, as we do not know what to do anyways
+ if model == "net":
+ _add_device_model_net(
+ lib_env,
+ # we are sure it's there, it was validated in add_quorum_device
+ model_options["host"],
+ cfg.get_cluster_name(),
+ cfg.get_nodes(),
+ skip_offline_nodes
+ )
+
+ lib_env.report_processor.process(
+ reports.service_enable_started("corosync-qdevice")
+ )
+ communicator = lib_env.node_communicator()
+ parallel_nodes_communication_helper(
+ qdevice_client.remote_client_enable,
+ [
+ [(lib_env.report_processor, communicator, node), {}]
+ for node in cfg.get_nodes()
+ ],
+ lib_env.report_processor,
+ skip_offline_nodes
+ )
+
+ # everything set up, it's safe to tell the nodes to use qdevice
lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+ # Now, when corosync.conf has been reloaded, we can start qdevice service.
+ if lib_env.is_corosync_conf_live:
+ lib_env.report_processor.process(
+ reports.service_start_started("corosync-qdevice")
+ )
+ communicator = lib_env.node_communicator()
+ parallel_nodes_communication_helper(
+ qdevice_client.remote_client_start,
+ [
+ [(lib_env.report_processor, communicator, node), {}]
+ for node in cfg.get_nodes()
+ ],
+ lib_env.report_processor,
+ skip_offline_nodes
+ )
+
+def _add_device_model_net(
+ lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes
+):
+ """
+ setup cluster nodes for using qdevice model net
+ string qnetd_host address of qdevice provider (qnetd host)
+ string cluster_name name of the cluster to which qdevice is being added
+ NodeAddressesList cluster_nodes list of cluster nodes addresses
+ bool skip_offline_nodes continue even if not all nodes are accessible
+ """
+ communicator = lib_env.node_communicator()
+ runner = lib_env.cmd_runner()
+ reporter = lib_env.report_processor
+
+ reporter.process(
+ reports.qdevice_certificate_distribution_started()
+ )
+ # get qnetd CA certificate
+ try:
+ qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate(
+ communicator,
+ qnetd_host
+ )
+ except NodeCommunicationException as e:
+ raise LibraryError(
+ node_communicator_exception_to_report_item(e)
+ )
+ # init certificate storage on all nodes
+ parallel_nodes_communication_helper(
+ qdevice_net.remote_client_setup,
+ [
+ ((communicator, node, qnetd_ca_cert), {})
+ for node in cluster_nodes
+ ],
+ reporter,
+ skip_offline_nodes
+ )
+ # create client certificate request
+ cert_request = qdevice_net.client_generate_certificate_request(
+ runner,
+ cluster_name
+ )
+ # sign the request on qnetd host
+ try:
+ signed_certificate = qdevice_net.remote_sign_certificate_request(
+ communicator,
+ qnetd_host,
+ cert_request,
+ cluster_name
+ )
+ except NodeCommunicationException as e:
+ raise LibraryError(
+ node_communicator_exception_to_report_item(e)
+ )
+ # transform the signed certificate to pk12 format which can sent to nodes
+ pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate)
+ # distribute final certificate to nodes
+ def do_and_report(reporter, communicator, node, pk12):
+ qdevice_net.remote_client_import_certificate_and_key(
+ communicator, node, pk12
+ )
+ reporter.process(
+ reports.qdevice_certificate_accepted_by_node(node.label)
+ )
+ parallel_nodes_communication_helper(
+ do_and_report,
+ [
+ ((reporter, communicator, node, pk12), {})
+ for node in cluster_nodes
+ ],
+ reporter,
+ skip_offline_nodes
+ )
+
def update_device(
lib_env, model_options, generic_options, force_options=False,
skip_offline_nodes=False
@@ -98,9 +246,95 @@ def remove_device(lib_env, skip_offline_nodes=False):
__ensure_not_cman(lib_env)
cfg = lib_env.get_corosync_conf()
+ model, dummy_options, dummy_options = cfg.get_quorum_device_settings()
cfg.remove_quorum_device()
lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+ if lib_env.is_corosync_conf_live:
+ # disable qdevice
+ lib_env.report_processor.process(
+ reports.service_disable_started("corosync-qdevice")
+ )
+ communicator = lib_env.node_communicator()
+ parallel_nodes_communication_helper(
+ qdevice_client.remote_client_disable,
+ [
+ [(lib_env.report_processor, communicator, node), {}]
+ for node in cfg.get_nodes()
+ ],
+ lib_env.report_processor,
+ skip_offline_nodes
+ )
+ # stop qdevice
+ lib_env.report_processor.process(
+ reports.service_stop_started("corosync-qdevice")
+ )
+ communicator = lib_env.node_communicator()
+ parallel_nodes_communication_helper(
+ qdevice_client.remote_client_stop,
+ [
+ [(lib_env.report_processor, communicator, node), {}]
+ for node in cfg.get_nodes()
+ ],
+ lib_env.report_processor,
+ skip_offline_nodes
+ )
+ # handle model specific configuration
+ if model == "net":
+ _remove_device_model_net(
+ lib_env,
+ cfg.get_nodes(),
+ skip_offline_nodes
+ )
+
+def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
+ """
+ remove configuration used by qdevice model net
+ NodeAddressesList cluster_nodes list of cluster nodes addresses
+ bool skip_offline_nodes continue even if not all nodes are accessible
+ """
+ reporter = lib_env.report_processor
+ communicator = lib_env.node_communicator()
+
+ reporter.process(
+ reports.qdevice_certificate_removal_started()
+ )
+ def do_and_report(reporter, communicator, node):
+ qdevice_net.remote_client_destroy(communicator, node)
+ reporter.process(
+ reports.qdevice_certificate_removed_from_node(node.label)
+ )
+ parallel_nodes_communication_helper(
+ do_and_report,
+ [
+ [(reporter, communicator, node), {}]
+ for node in cluster_nodes
+ ],
+ lib_env.report_processor,
+ skip_offline_nodes
+ )
+
+def set_expected_votes_live(lib_env, expected_votes):
+ """
+ set expected votes in live cluster to specified value
+ numeric expected_votes desired value of expected votes
+ """
+ if lib_env.is_cman_cluster:
+ raise LibraryError(reports.cman_unsupported_command())
+
+ try:
+ votes_int = int(expected_votes)
+ if votes_int < 1:
+ raise ValueError()
+ except ValueError:
+ raise LibraryError(reports.invalid_option_value(
+ "expected votes",
+ expected_votes,
+ "positive integer"
+ ))
+
+ corosync_live.set_expected_votes(lib_env.cmd_runner(), votes_int)
+
def __ensure_not_cman(lib_env):
if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster:
raise LibraryError(reports.cman_unsupported_command())
diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
new file mode 100644
index 0000000..34813df
--- /dev/null
+++ b/pcs/lib/commands/test/test_alert.py
@@ -0,0 +1,639 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import logging
+from lxml import etree
+
+from unittest import TestCase
+
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import (
+ assert_raise_library_error,
+ assert_xml_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as Severities
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.external import CommandRunner
+
+import pcs.lib.commands.alert as cmd_alert
+
+
+ at mock.patch("pcs.lib.cib.tools.upgrade_cib")
+class CreateAlertTest(TestCase):
+ def setUp(self):
+ self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+ self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_rep = MockLibraryReportProcessor()
+ self.mock_env = LibraryEnvironment(
+ self.mock_log, self.mock_rep, cib_data="<cib/>"
+ )
+
+ def test_no_path(self, mock_upgrade_cib):
+ assert_raise_library_error(
+ lambda: cmd_alert.create_alert(
+ self.mock_env, None, None, None, None
+ ),
+ (
+ Severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {"option_name": "path"}
+ )
+ )
+ self.assertEqual(0, mock_upgrade_cib.call_count)
+
+ def test_upgrade_needed(self, mock_upgrade_cib):
+ self.mock_env._push_cib_xml(
+ """
+ <cib validate-with="pacemaker-2.4.1">
+ <configuration>
+ </configuration>
+ </cib>
+ """
+ )
+ mock_upgrade_cib.return_value = etree.XML(
+ """
+ <cib validate-with="pacemaker-2.5.0">
+ <configuration>
+ </configuration>
+ </cib>
+ """
+ )
+ cmd_alert.create_alert(
+ self.mock_env,
+ "my-alert",
+ "/my/path",
+ {
+ "instance": "value",
+ "another": "val"
+ },
+ {"meta1": "val1"},
+ "my description"
+ )
+ assert_xml_equal(
+ """
+<cib validate-with="pacemaker-2.5.0">
+ <configuration>
+ <alerts>
+ <alert id="my-alert" path="/my/path" description="my description">
+ <meta_attributes id="my-alert-meta_attributes">
+ <nvpair
+ id="my-alert-meta_attributes-meta1"
+ name="meta1"
+ value="val1"
+ />
+ </meta_attributes>
+ <instance_attributes id="my-alert-instance_attributes">
+ <nvpair
+ id="my-alert-instance_attributes-another"
+ name="another"
+ value="val"
+ />
+ <nvpair
+ id="my-alert-instance_attributes-instance"
+ name="instance"
+ value="value"
+ />
+ </instance_attributes>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """,
+ self.mock_env._get_cib_xml()
+ )
+ self.assertEqual(1, mock_upgrade_cib.call_count)
+
+
+class UpdateAlertTest(TestCase):
+ def setUp(self):
+ self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+ self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_rep = MockLibraryReportProcessor()
+ self.mock_env = LibraryEnvironment(
+ self.mock_log, self.mock_rep, cib_data="<cib/>"
+ )
+
+ def test_update_all(self):
+ self.mock_env._push_cib_xml(
+ """
+<cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="my-alert" path="/my/path" description="my description">
+ <instance_attributes id="my-alert-instance_attributes">
+ <nvpair
+ id="my-alert-instance_attributes-instance"
+ name="instance"
+ value="value"
+ />
+ <nvpair
+ id="my-alert-instance_attributes-another"
+ name="another"
+ value="val"
+ />
+ </instance_attributes>
+ <meta_attributes id="my-alert-meta_attributes">
+ <nvpair
+ id="my-alert-meta_attributes-meta1"
+ name="meta1"
+ value="val1"
+ />
+ </meta_attributes>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """
+ )
+ cmd_alert.update_alert(
+ self.mock_env,
+ "my-alert",
+ "/another/one",
+ {
+ "instance": "",
+ "my-attr": "its_val"
+ },
+ {"meta1": "val2"},
+ ""
+ )
+ assert_xml_equal(
+ """
+<cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="my-alert" path="/another/one">
+ <instance_attributes id="my-alert-instance_attributes">
+ <nvpair
+ id="my-alert-instance_attributes-another"
+ name="another"
+ value="val"
+ />
+ <nvpair
+ id="my-alert-instance_attributes-my-attr"
+ name="my-attr"
+ value="its_val"
+ />
+ </instance_attributes>
+ <meta_attributes id="my-alert-meta_attributes">
+ <nvpair
+ id="my-alert-meta_attributes-meta1"
+ name="meta1"
+ value="val2"
+ />
+ </meta_attributes>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """,
+ self.mock_env._get_cib_xml()
+ )
+
+ def test_update_instance_attribute(self):
+ self.mock_env._push_cib_xml(
+ """
+<cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="my-alert" path="/my/path" description="my description">
+ <instance_attributes id="my-alert-instance_attributes">
+ <nvpair
+ id="my-alert-instance_attributes-instance"
+ name="instance"
+ value="value"
+ />
+ </instance_attributes>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """
+ )
+ cmd_alert.update_alert(
+ self.mock_env,
+ "my-alert",
+ None,
+ {"instance": "new_val"},
+ {},
+ None
+ )
+ assert_xml_equal(
+ """
+<cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="my-alert" path="/my/path" description="my description">
+ <instance_attributes id="my-alert-instance_attributes">
+ <nvpair
+ id="my-alert-instance_attributes-instance"
+ name="instance"
+ value="new_val"
+ />
+ </instance_attributes>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """,
+ self.mock_env._get_cib_xml()
+ )
+
+ def test_alert_doesnt_exist(self):
+ self.mock_env._push_cib_xml(
+ """
+ <cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ )
+ assert_raise_library_error(
+ lambda: cmd_alert.update_alert(
+ self.mock_env, "unknown", "test", {}, {}, None
+ ),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "unknown"}
+ )
+ )
+
+
+class RemoveAlertTest(TestCase):
+ def setUp(self):
+ self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+ self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_rep = MockLibraryReportProcessor()
+ cib = """
+ <cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path"/>
+ <alert id="alert-1" path="/path"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ self.mock_env = LibraryEnvironment(
+ self.mock_log, self.mock_rep, cib_data=cib
+ )
+
+ def test_success(self):
+ cmd_alert.remove_alert(self.mock_env, "alert")
+ assert_xml_equal(
+ """
+ <cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert-1" path="/path"/>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ self.mock_env._get_cib_xml()
+ )
+
+ def test_not_existing_alert(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.remove_alert(self.mock_env, "unknown"),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "unknown"}
+ )
+ )
+
+
+class AddRecipientTest(TestCase):
+ def setUp(self):
+ self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+ self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_rep = MockLibraryReportProcessor()
+ cib = """
+ <cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ self.mock_env = LibraryEnvironment(
+ self.mock_log, self.mock_rep, cib_data=cib
+ )
+
+ def test_alert_not_found(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.add_recipient(
+ self.mock_env, "unknown", "recipient", {}, {}
+ ),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "unknown"}
+ )
+ )
+
+ def test_value_not_defined(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.add_recipient(
+ self.mock_env, "unknown", "", {}, {}
+ ),
+ (
+ Severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {"option_name": "value"}
+ )
+ )
+
+ def test_recipient_already_exists(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.add_recipient(
+ self.mock_env, "alert", "value1", {}, {}
+ ),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+ {
+ "recipient": "value1",
+ "alert": "alert"
+ }
+ )
+ )
+
+ def test_success(self):
+ cmd_alert.add_recipient(
+ self.mock_env,
+ "alert",
+ "value",
+ {"attr1": "val1"},
+ {
+ "attr2": "val2",
+ "attr1": "val1"
+ }
+ )
+ assert_xml_equal(
+ """
+<cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient id="alert-recipient-1" value="value">
+ <meta_attributes
+ id="alert-recipient-1-meta_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr2"
+ name="attr2"
+ value="val2"
+ />
+ </meta_attributes>
+ <instance_attributes
+ id="alert-recipient-1-instance_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-instance_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ </instance_attributes>
+ </recipient>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """,
+ self.mock_env._get_cib_xml()
+ )
+
+
+class UpdateRecipientTest(TestCase):
+ def setUp(self):
+ self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+ self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_rep = MockLibraryReportProcessor()
+ cib = """
+<cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient id="alert-recipient-1" value="value" description="d">
+ <meta_attributes
+ id="alert-recipient-1-meta_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr2"
+ name="attr2"
+ value="val2"
+ />
+ </meta_attributes>
+ <instance_attributes
+ id="alert-recipient-1-instance_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-instance_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ </instance_attributes>
+ </recipient>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """
+ self.mock_env = LibraryEnvironment(
+ self.mock_log, self.mock_rep, cib_data=cib
+ )
+
+ def test_alert_not_found(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.update_recipient(
+ self.mock_env, "unknown", "recipient", {}, {}
+ ),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "unknown"}
+ )
+ )
+
+ def test_recipient_not_found(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.update_recipient(
+ self.mock_env, "alert", "recipient", {}, {}
+ ),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+ {
+ "recipient": "recipient",
+ "alert": "alert"
+ }
+ )
+ )
+
+ def test_update_all(self):
+ cmd_alert.update_recipient(
+ self.mock_env,
+ "alert",
+ "value",
+ {"attr1": "value"},
+ {
+ "attr1": "",
+ "attr3": "new_val"
+ },
+ "desc"
+ )
+ assert_xml_equal(
+ """
+<cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient
+ id="alert-recipient-1"
+ value="value"
+ description="desc"
+ >
+ <meta_attributes
+ id="alert-recipient-1-meta_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr2"
+ name="attr2"
+ value="val2"
+ />
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr3"
+ name="attr3"
+ value="new_val"
+ />
+ </meta_attributes>
+ <instance_attributes
+ id="alert-recipient-1-instance_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-instance_attributes-attr1"
+ name="attr1"
+ value="value"
+ />
+ </instance_attributes>
+ </recipient>
+ </alert>
+ </alerts>
+ </configuration>
+</cib>
+ """,
+ self.mock_env._get_cib_xml()
+ )
+
+
+class RemoveRecipientTest(TestCase):
+ def setUp(self):
+ self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+ self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_rep = MockLibraryReportProcessor()
+ cib = """
+ <cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient id="alert-recipient-1" value="value"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """
+ self.mock_env = LibraryEnvironment(
+ self.mock_log, self.mock_rep, cib_data=cib
+ )
+
+ def test_alert_not_found(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.remove_recipient(
+ self.mock_env, "unknown", "recipient"
+ ),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_NOT_FOUND,
+ {"alert": "unknown"}
+ )
+ )
+
+ def test_recipient_not_found(self):
+ assert_raise_library_error(
+ lambda: cmd_alert.remove_recipient(
+ self.mock_env, "alert", "recipient"
+ ),
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+ {
+ "recipient": "recipient",
+ "alert": "alert"
+ }
+ )
+ )
+
+ def test_success(self):
+ cmd_alert.remove_recipient(self.mock_env, "alert", "value1")
+ assert_xml_equal(
+ """
+ <cib validate-with="pacemaker-2.5">
+ <configuration>
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient-1" value="value"/>
+ </alert>
+ </alerts>
+ </configuration>
+ </cib>
+ """,
+ self.mock_env._get_cib_xml()
+ )
+
+
+ at mock.patch("pcs.lib.cib.alert.get_all_alerts")
+class GetAllAlertsTest(TestCase):
+ def setUp(self):
+ self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+ self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_rep = MockLibraryReportProcessor()
+ self.mock_env = LibraryEnvironment(
+ self.mock_log, self.mock_rep, cib_data='<cib/>'
+ )
+
+ def test_success(self, mock_alerts):
+ mock_alerts.return_value = [{"id": "alert"}]
+ self.assertEqual(
+ [{"id": "alert"}],
+ cmd_alert.get_all_alerts(self.mock_env)
+ )
+ self.assertEqual(1, mock_alerts.call_count)
diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
index a22a014..751001b 100644
--- a/pcs/lib/commands/test/test_ticket.py
+++ b/pcs/lib/commands/test/test_ticket.py
@@ -44,7 +44,7 @@ class CreateTest(TestCase):
})
assert_xml_equal(
- env.get_cib_xml(),
+ env._get_cib_xml(),
str(cib.append_to_first_tag_name(
'constraints', """
<rsc_ticket
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
index 5a486ca..600a89b 100644
--- a/pcs/lib/corosync/config_facade.py
+++ b/pcs/lib/corosync/config_facade.py
@@ -22,6 +22,12 @@ class ConfigFacade(object):
"last_man_standing_window",
"wait_for_all",
)
+ QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = (
+ "auto_tie_breaker",
+ "last_man_standing",
+ "last_man_standing_window",
+ )
+
@classmethod
def from_string(cls, config_string):
@@ -52,6 +58,8 @@ class ConfigFacade(object):
self._config = parsed_config
# set to True if changes cannot be applied on running cluster
self._need_stopped_cluster = False
+ # set to True if qdevice reload is required to apply changes
+ self._need_qdevice_reload = False
@property
def config(self):
@@ -61,6 +69,17 @@ class ConfigFacade(object):
def need_stopped_cluster(self):
return self._need_stopped_cluster
+ @property
+ def need_qdevice_reload(self):
+ return self._need_qdevice_reload
+
+ def get_cluster_name(self):
+ cluster_name = ""
+ for totem in self.config.get_sections("totem"):
+ for attrs in totem.get_attributes("cluster_name"):
+ cluster_name = attrs[1]
+ return cluster_name
+
def get_nodes(self):
"""
Get all defined nodes
@@ -112,8 +131,9 @@ class ConfigFacade(object):
def __validate_quorum_options(self, options):
report_items = []
+ has_qdevice = self.has_quorum_device()
+ qdevice_incompatible_options = []
for name, value in sorted(options.items()):
-
allowed_names = self.__class__.QUORUM_OPTIONS
if name not in allowed_names:
report_items.append(
@@ -124,6 +144,13 @@ class ConfigFacade(object):
if value == "":
continue
+ if (
+ has_qdevice
+ and
+ name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE
+ ):
+ qdevice_incompatible_options.append(name)
+
if name == "last_man_standing_window":
if not value.isdigit():
report_items.append(reports.invalid_option_value(
@@ -137,6 +164,13 @@ class ConfigFacade(object):
name, value, allowed_values
))
+ if qdevice_incompatible_options:
+ report_items.append(
+ reports.corosync_options_incompatible_with_qdevice(
+ qdevice_incompatible_options
+ )
+ )
+
return report_items
def has_quorum_device(self):
@@ -201,13 +235,13 @@ class ConfigFacade(object):
force=force_options
)
)
+
# configuration cleanup
- remove_need_stopped_cluster = {
- "auto_tie_breaker": "",
- "last_man_standing": "",
- "last_man_standing_window": "",
- }
- need_stopped_cluster = False
+ remove_need_stopped_cluster = dict([
+ (name, "")
+ for name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE
+ ])
+ # remove old device settings
quorum_section_list = self.__ensure_section(self.config, "quorum")
for quorum in quorum_section_list:
for device in quorum.get_sections("device"):
@@ -218,13 +252,19 @@ class ConfigFacade(object):
and
value not in ["", "0"]
):
- need_stopped_cluster = True
+ self._need_stopped_cluster = True
+ # remove conflicting quorum options
attrs_to_remove = {
"allow_downscale": "",
"two_node": "",
}
attrs_to_remove.update(remove_need_stopped_cluster)
self.__set_section_options(quorum_section_list, attrs_to_remove)
+ # remove nodes' votes
+ for nodelist in self.config.get_sections("nodelist"):
+ for node in nodelist.get_sections("node"):
+ node.del_attributes_by_name("quorum_votes")
+
# add new configuration
quorum = quorum_section_list[-1]
new_device = config_parser.Section("device")
@@ -234,12 +274,9 @@ class ConfigFacade(object):
new_model = config_parser.Section(model)
self.__set_section_options([new_model], model_options)
new_device.add_section(new_model)
+ self.__update_qdevice_votes()
self.__update_two_node()
self.__remove_empty_sections(self.config)
- # update_two_node sets self._need_stopped_cluster when changing an
- # algorithm lms <-> 2nodelms. We don't care about that, it's not really
- # a change, as there was no qdevice before. So we override it.
- self._need_stopped_cluster = need_stopped_cluster
def update_quorum_device(
self, report_processor, model_options, generic_options,
@@ -281,9 +318,10 @@ class ConfigFacade(object):
model_sections.extend(device.get_sections(model))
self.__set_section_options(device_sections, generic_options)
self.__set_section_options(model_sections, model_options)
+ self.__update_qdevice_votes()
self.__update_two_node()
self.__remove_empty_sections(self.config)
- self._need_stopped_cluster = True
+ self._need_qdevice_reload = True
def remove_quorum_device(self):
"""
@@ -369,7 +407,7 @@ class ConfigFacade(object):
continue
if name == "algorithm":
- allowed_values = ("2nodelms", "ffsplit", "lms")
+ allowed_values = ("ffsplit", "lms")
if value not in allowed_values:
report_items.append(reports.invalid_option_value(
name, value, allowed_values, severity, forceable
@@ -461,19 +499,29 @@ class ConfigFacade(object):
else:
for quorum in self.config.get_sections("quorum"):
quorum.del_attributes_by_name("two_node")
- # update qdevice algorithm "lms" vs "2nodelms"
+
+ def __update_qdevice_votes(self):
+ # ffsplit won't start if votes is missing or not set to 1
+ # for other algorithms it's required not to put votes at all
+ model = None
+ algorithm = None
+ device_sections = []
for quorum in self.config.get_sections("quorum"):
for device in quorum.get_sections("device"):
- for net in device.get_sections("net"):
- algorithm = None
- for dummy_name, value in net.get_attributes("algorithm"):
- algorithm = value
- if algorithm == "lms" and has_two_nodes:
- net.set_attribute("algorithm", "2nodelms")
- self._need_stopped_cluster = True
- elif algorithm == "2nodelms" and not has_two_nodes:
- net.set_attribute("algorithm", "lms")
- self._need_stopped_cluster = True
+ device_sections.append(device)
+ for dummy_name, value in device.get_attributes("model"):
+ model = value
+ for device in device_sections:
+ for model_section in device.get_sections(model):
+ for dummy_name, value in model_section.get_attributes(
+ "algorithm"
+ ):
+ algorithm = value
+ if model == "net":
+ if algorithm == "ffsplit":
+ self.__set_section_options(device_sections, {"votes": "1"})
+ else:
+ self.__set_section_options(device_sections, {"votes": ""})
def __set_section_options(self, section_list, options):
for section in section_list[:-1]:
diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
index 2446a46..b49b9f6 100644
--- a/pcs/lib/corosync/live.py
+++ b/pcs/lib/corosync/live.py
@@ -47,3 +47,33 @@ def reload_config(runner):
reports.corosync_config_reload_error(output.rstrip())
)
+def get_quorum_status_text(runner):
+ """
+ Get runtime quorum status from the local node
+ """
+ output, retval = runner.run([
+ os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
+ "-p"
+ ])
+ # retval is 0 on success if node is not in partition with quorum
+ # retval is 1 on error OR on success if node has quorum
+ if retval not in [0, 1]:
+ raise LibraryError(
+ reports.corosync_quorum_get_status_error(output)
+ )
+ return output
+
+def set_expected_votes(runner, votes):
+ """
+ set expected votes in live cluster to specified value
+ """
+ output, retval = runner.run([
+ os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
+ # format votes to handle the case where they are int
+ "-e", "{0}".format(votes)
+ ])
+ if retval != 0:
+ raise LibraryError(
+ reports.corosync_quorum_set_expected_votes_error(output)
+ )
+ return output
diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py
new file mode 100644
index 0000000..98fbb0e
--- /dev/null
+++ b/pcs/lib/corosync/qdevice_client.py
@@ -0,0 +1,93 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import os.path
+
+from pcs import settings
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+
+
+def get_status_text(runner, verbose=False):
+ """
+ Get quorum device client runtime status in plain text
+ bool verbose get more detailed output
+ """
+ cmd = [
+ os.path.join(settings.corosync_binaries, "corosync-qdevice-tool"),
+ "-s"
+ ]
+ if verbose:
+ cmd.append("-v")
+ output, retval = runner.run(cmd)
+ if retval != 0:
+ raise LibraryError(
+ reports.corosync_quorum_get_status_error(output)
+ )
+ return output
+
+def remote_client_enable(reporter, node_communicator, node):
+ """
+ enable qdevice client service (corosync-qdevice) on a remote node
+ """
+ response = node_communicator.call_node(
+ node,
+ "remote/qdevice_client_enable",
+ None
+ )
+ if response == "corosync is not enabled, skipping":
+ reporter.process(
+ reports.service_enable_skipped(
+ "corosync-qdevice",
+ "corosync is not enabled",
+ node.label
+ )
+ )
+ else:
+ reporter.process(
+ reports.service_enable_success("corosync-qdevice", node.label)
+ )
+
+def remote_client_disable(reporter, node_communicator, node):
+ """
+ disable qdevice client service (corosync-qdevice) on a remote node
+ """
+ node_communicator.call_node(node, "remote/qdevice_client_disable", None)
+ reporter.process(
+ reports.service_disable_success("corosync-qdevice", node.label)
+ )
+
+def remote_client_start(reporter, node_communicator, node):
+ """
+ start qdevice client service (corosync-qdevice) on a remote node
+ """
+ response = node_communicator.call_node(
+ node,
+ "remote/qdevice_client_start",
+ None
+ )
+ if response == "corosync is not running, skipping":
+ reporter.process(
+ reports.service_start_skipped(
+ "corosync-qdevice",
+ "corosync is not running",
+ node.label
+ )
+ )
+ else:
+ reporter.process(
+ reports.service_start_success("corosync-qdevice", node.label)
+ )
+
+def remote_client_stop(reporter, node_communicator, node):
+ """
+ stop qdevice client service (corosync-qdevice) on a remote node
+ """
+ node_communicator.call_node(node, "remote/qdevice_client_stop", None)
+ reporter.process(
+ reports.service_stop_success("corosync-qdevice", node.label)
+ )
diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
index 7479257..4054592 100644
--- a/pcs/lib/corosync/qdevice_net.py
+++ b/pcs/lib/corosync/qdevice_net.py
@@ -5,8 +5,14 @@ from __future__ import (
unicode_literals,
)
+import base64
+import binascii
+import functools
+import os
import os.path
+import re
import shutil
+import tempfile
from pcs import settings
from pcs.lib import external, reports
@@ -15,6 +21,18 @@ from pcs.lib.errors import LibraryError
__model = "net"
__service_name = "corosync-qnetd"
+__qnetd_certutil = os.path.join(
+ settings.corosync_qnet_binaries,
+ "corosync-qnetd-certutil"
+)
+__qnetd_tool = os.path.join(
+ settings.corosync_qnet_binaries,
+ "corosync-qnetd-tool"
+)
+__qdevice_certutil = os.path.join(
+ settings.corosync_binaries,
+ "corosync-qdevice-net-certutil"
+)
def qdevice_setup(runner):
"""
@@ -24,25 +42,63 @@ def qdevice_setup(runner):
raise LibraryError(reports.qdevice_already_initialized(__model))
output, retval = runner.run([
- os.path.join(settings.corosync_binaries, "corosync-qnetd-certutil"),
- "-i"
+ __qnetd_certutil, "-i"
])
if retval != 0:
raise LibraryError(
reports.qdevice_initialization_error(__model, output.rstrip())
)
+def qdevice_initialized():
+ """
+ check if qdevice server certificate database has been initialized
+ """
+ return os.path.exists(os.path.join(
+ settings.corosync_qdevice_net_server_certs_dir,
+ "cert8.db"
+ ))
+
def qdevice_destroy():
"""
delete qdevice configuration on local host
"""
try:
- shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir)
+ if qdevice_initialized():
+ shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir)
except EnvironmentError as e:
raise LibraryError(
reports.qdevice_destroy_error(__model, e.strerror)
)
+def qdevice_status_generic_text(runner, verbose=False):
+ """
+ get qdevice runtime status in plain text
+ bool verbose get more detailed output
+ """
+ cmd = [__qnetd_tool, "-s"]
+ if verbose:
+ cmd.append("-v")
+ output, retval = runner.run(cmd)
+ if retval != 0:
+ raise LibraryError(reports.qdevice_get_status_error(__model, output))
+ return output
+
+def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
+ """
+ get qdevice runtime status in plain text
+ bool verbose get more detailed output
+ string cluster show information only about specified cluster
+ """
+ cmd = [__qnetd_tool, "-l"]
+ if verbose:
+ cmd.append("-v")
+ if cluster:
+ cmd.extend(["-c", cluster])
+ output, retval = runner.run(cmd)
+ if retval != 0:
+ raise LibraryError(reports.qdevice_get_status_error(__model, output))
+ return output
+
def qdevice_enable(runner):
"""
make qdevice start automatically on boot on local host
@@ -72,3 +128,255 @@ def qdevice_kill(runner):
kill qdevice now on local host
"""
external.kill_services(runner, [__service_name])
+
+def qdevice_sign_certificate_request(runner, cert_request, cluster_name):
+ """
+ sign client certificate request
+ cert_request certificate request data
+ string cluster_name name of the cluster to which qdevice is being added
+ """
+ if not qdevice_initialized():
+ raise LibraryError(reports.qdevice_not_initialized(__model))
+ # save the certificate request, corosync tool only works with files
+ tmpfile = _store_to_tmpfile(
+ cert_request,
+ reports.qdevice_certificate_sign_error
+ )
+ # sign the request
+ output, retval = runner.run([
+ __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name
+ ])
+ tmpfile.close() # temp file is deleted on close
+ if retval != 0:
+ raise LibraryError(
+ reports.qdevice_certificate_sign_error(output.strip())
+ )
+ # get signed certificate, corosync tool only works with files
+ return _get_output_certificate(
+ output,
+ reports.qdevice_certificate_sign_error
+ )
+
+def client_setup(runner, ca_certificate):
+ """
+ initialize qdevice client on local host
+ ca_certificate qnetd CA certificate
+ """
+ client_destroy()
+ # save CA certificate, corosync tool only works with files
+ ca_file_path = os.path.join(
+ settings.corosync_qdevice_net_client_certs_dir,
+ settings.corosync_qdevice_net_client_ca_file_name
+ )
+ try:
+ if not os.path.exists(ca_file_path):
+ os.makedirs(
+ settings.corosync_qdevice_net_client_certs_dir,
+ mode=0o700
+ )
+ with open(ca_file_path, "wb") as ca_file:
+ ca_file.write(ca_certificate)
+ except EnvironmentError as e:
+ raise LibraryError(
+ reports.qdevice_initialization_error(__model, e.strerror)
+ )
+ # initialize client's certificate storage
+ output, retval = runner.run([
+ __qdevice_certutil, "-i", "-c", ca_file_path
+ ])
+ if retval != 0:
+ raise LibraryError(
+ reports.qdevice_initialization_error(__model, output.rstrip())
+ )
+
+def client_initialized():
+ """
+ check if qdevice net client certificate database has been initialized
+ """
+ return os.path.exists(os.path.join(
+ settings.corosync_qdevice_net_client_certs_dir,
+ "cert8.db"
+ ))
+
+def client_destroy():
+ """
+ delete qdevice client config files on local host
+ """
+ try:
+ if client_initialized():
+ shutil.rmtree(settings.corosync_qdevice_net_client_certs_dir)
+ except EnvironmentError as e:
+ raise LibraryError(
+ reports.qdevice_destroy_error(__model, e.strerror)
+ )
+
+def client_generate_certificate_request(runner, cluster_name):
+ """
+ create a certificate request which can be signed by qnetd server
+ string cluster_name name of the cluster to which qdevice is being added
+ """
+ if not client_initialized():
+ raise LibraryError(reports.qdevice_not_initialized(__model))
+ output, retval = runner.run([
+ __qdevice_certutil, "-r", "-n", cluster_name
+ ])
+ if retval != 0:
+ raise LibraryError(
+ reports.qdevice_initialization_error(__model, output.rstrip())
+ )
+ return _get_output_certificate(
+ output,
+ functools.partial(reports.qdevice_initialization_error, __model)
+ )
+
+def client_cert_request_to_pk12(runner, cert_request):
+ """
+ transform signed certificate request to pk12 certificate which can be
+ imported to nodes
+ cert_request signed certificate request
+ """
+ if not client_initialized():
+ raise LibraryError(reports.qdevice_not_initialized(__model))
+ # save the signed certificate request, corosync tool only works with files
+ tmpfile = _store_to_tmpfile(
+ cert_request,
+ reports.qdevice_certificate_import_error
+ )
+ # transform it
+ output, retval = runner.run([
+ __qdevice_certutil, "-M", "-c", tmpfile.name
+ ])
+ tmpfile.close() # temp file is deleted on close
+ if retval != 0:
+ raise LibraryError(
+ reports.qdevice_certificate_import_error(output)
+ )
+ # get resulting pk12, corosync tool only works with files
+ return _get_output_certificate(
+ output,
+ reports.qdevice_certificate_import_error
+ )
+
+def client_import_certificate_and_key(runner, pk12_certificate):
+ """
+ import qdevice client certificate to the local node certificate storage
+ """
+ if not client_initialized():
+ raise LibraryError(reports.qdevice_not_initialized(__model))
+ # save the certificate, corosync tool only works with files
+ tmpfile = _store_to_tmpfile(
+ pk12_certificate,
+ reports.qdevice_certificate_import_error
+ )
+ output, retval = runner.run([
+ __qdevice_certutil, "-m", "-c", tmpfile.name
+ ])
+ tmpfile.close() # temp file is deleted on close
+ if retval != 0:
+ raise LibraryError(
+ reports.qdevice_certificate_import_error(output)
+ )
+
+def remote_qdevice_get_ca_certificate(node_communicator, host):
+ """
+ connect to a qnetd host and get qnetd CA certificate
+ string host address of the qnetd host
+ """
+ try:
+ return base64.b64decode(
+ node_communicator.call_host(
+ host,
+ "remote/qdevice_net_get_ca_certificate",
+ None
+ )
+ )
+ except (TypeError, binascii.Error):
+ raise LibraryError(reports.invalid_response_format(host))
+
+def remote_client_setup(node_communicator, node, qnetd_ca_certificate):
+ """
+ connect to a remote node and initialize qdevice there
+ NodeAddresses node target node
+ qnetd_ca_certificate qnetd CA certificate
+ """
+ return node_communicator.call_node(
+ node,
+ "remote/qdevice_net_client_init_certificate_storage",
+ external.NodeCommunicator.format_data_dict([
+ ("ca_certificate", base64.b64encode(qnetd_ca_certificate)),
+ ])
+ )
+
+def remote_sign_certificate_request(
+ node_communicator, host, cert_request, cluster_name
+):
+ """
+ connect to a qdevice host and sign node certificate there
+ string host address of the qnetd host
+ cert_request certificate request to be signed
+ string cluster_name name of the cluster to which qdevice is being added
+ """
+ try:
+ return base64.b64decode(
+ node_communicator.call_host(
+ host,
+ "remote/qdevice_net_sign_node_certificate",
+ external.NodeCommunicator.format_data_dict([
+ ("certificate_request", base64.b64encode(cert_request)),
+ ("cluster_name", cluster_name),
+ ])
+ )
+ )
+ except (TypeError, binascii.Error):
+ raise LibraryError(reports.invalid_response_format(host))
+
+def remote_client_import_certificate_and_key(node_communicator, node, pk12):
+ """
+ import pk12 certificate on a remote node
+ NodeAddresses node target node
+ pk12 certificate
+ """
+ return node_communicator.call_node(
+ node,
+ "remote/qdevice_net_client_import_certificate",
+ external.NodeCommunicator.format_data_dict([
+ ("certificate", base64.b64encode(pk12)),
+ ])
+ )
+
+def remote_client_destroy(node_communicator, node):
+ """
+ delete qdevice client config files on a remote node
+ NodeAddresses node target node
+ """
+ return node_communicator.call_node(
+ node,
+ "remote/qdevice_net_client_destroy",
+ None
+ )
+
+def _store_to_tmpfile(data, report_func):
+ try:
+ tmpfile = tempfile.NamedTemporaryFile(mode="wb", suffix=".pcs")
+ tmpfile.write(data)
+ tmpfile.flush()
+ return tmpfile
+ except EnvironmentError as e:
+ raise LibraryError(report_func(e.strerror))
+
+def _get_output_certificate(cert_tool_output, report_func):
+ regexp = re.compile(r"^Certificate( request)? stored in (?P<path>.+)$")
+ filename = None
+ for line in cert_tool_output.splitlines():
+ match = regexp.search(line)
+ if match:
+ filename = match.group("path")
+ if not filename:
+ raise LibraryError(report_func(cert_tool_output))
+ try:
+ with open(filename, "rb") as cert_file:
+ return cert_file.read()
+ except EnvironmentError as e:
+ raise LibraryError(report_func(
+ "{path}: {error}".format(path=filename, error=e.strerror)
+ ))
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
index 99e3397..24e4252 100644
--- a/pcs/lib/env.py
+++ b/pcs/lib/env.py
@@ -10,6 +10,7 @@ from lxml import etree
from pcs.lib import reports
from pcs.lib.external import (
is_cman_cluster,
+ is_service_running,
CommandRunner,
NodeCommunicator,
)
@@ -21,12 +22,14 @@ from pcs.lib.corosync.live import (
from pcs.lib.nodes_task import (
distribute_corosync_conf,
check_corosync_offline_on_nodes,
+ qdevice_reload_on_nodes,
)
from pcs.lib.pacemaker import (
get_cib,
get_cib_xml,
replace_cib_configuration_xml,
)
+from pcs.lib.cib.tools import ensure_cib_version
class LibraryEnvironment(object):
@@ -54,6 +57,7 @@ class LibraryEnvironment(object):
# related code currently - it's in pcsd
self._auth_tokens_getter = auth_tokens_getter
self._auth_tokens = None
+ self._cib_upgraded = False
@property
def logger(self):
@@ -77,27 +81,45 @@ class LibraryEnvironment(object):
self._is_cman_cluster = is_cman_cluster(self.cmd_runner())
return self._is_cman_cluster
- def get_cib_xml(self):
+ @property
+ def cib_upgraded(self):
+ return self._cib_upgraded
+
+ def _get_cib_xml(self):
if self.is_cib_live:
return get_cib_xml(self.cmd_runner())
else:
return self._cib_data
- def get_cib(self):
- return get_cib(self.get_cib_xml())
+ def get_cib(self, minimal_version=None):
+ cib = get_cib(self._get_cib_xml())
+ if minimal_version is not None:
+ upgraded_cib = ensure_cib_version(
+ self.cmd_runner(), cib, minimal_version
+ )
+ if upgraded_cib is not None:
+ cib = upgraded_cib
+ self._cib_upgraded = True
+ return cib
- def push_cib_xml(self, cib_data):
+ def _push_cib_xml(self, cib_data):
if self.is_cib_live:
- replace_cib_configuration_xml(self.cmd_runner(), cib_data)
+ replace_cib_configuration_xml(
+ self.cmd_runner(), cib_data, self._cib_upgraded
+ )
+ if self._cib_upgraded:
+ self._cib_upgraded = False
+ self.report_processor.process(reports.cib_upgrade_successful())
else:
self._cib_data = cib_data
+
def push_cib(self, cib):
#etree returns bytes: b'xml'
#python 3 removed .encode() from bytes
#run(...) calls subprocess.Popen.communicate which calls encode...
#so here is bytes to str conversion
- self.push_cib_xml(etree.tostring(cib).decode())
+ self._push_cib_xml(etree.tostring(cib).decode())
@property
def is_cib_live(self):
@@ -132,11 +154,18 @@ class LibraryEnvironment(object):
corosync_conf_data,
skip_offline_nodes
)
- if not corosync_conf_facade.need_stopped_cluster:
+ if is_service_running(self.cmd_runner(), "corosync"):
reload_corosync_config(self.cmd_runner())
self.report_processor.process(
reports.corosync_config_reloaded()
)
+ if corosync_conf_facade.need_qdevice_reload:
+ qdevice_reload_on_nodes(
+ self.node_communicator(),
+ self.report_processor,
+ node_list,
+ skip_offline_nodes
+ )
else:
self._corosync_conf_data = corosync_conf_data
diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py
index c0bd3d1..9cab5e9 100644
--- a/pcs/lib/errors.py
+++ b/pcs/lib/errors.py
@@ -42,4 +42,8 @@ class ReportItem(object):
self.message = self.message_pattern.format(**self.info)
def __repr__(self):
- return self.code+": "+str(self.info)
+ return "{severity} {code}: {info}".format(
+ severity=self.severity,
+ code=self.code,
+ info=self.info
+ )
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
index 34426f9..c773e5a 100644
--- a/pcs/lib/external.py
+++ b/pcs/lib/external.py
@@ -49,7 +49,11 @@ except ImportError:
from pcs.lib import reports
from pcs.lib.errors import LibraryError, ReportItemSeverity
-from pcs.common.tools import simple_cache
+from pcs.common import report_codes
+from pcs.common.tools import (
+ simple_cache,
+ run_parallel as tools_run_parallel,
+)
from pcs import settings
@@ -521,7 +525,7 @@ class NodeCommunicator(object):
# text in response body with HTTP code 400
# we need to be backward compatible with that
raise NodeCommandUnsuccessfulException(
- host, request, response_data
+ host, request, response_data.rstrip()
)
elif e.code == 401:
raise NodeAuthenticationException(
@@ -581,3 +585,39 @@ class NodeCommunicator(object):
base64.b64encode(" ".join(self._groups).encode("utf-8"))
))
return cookies
+
+
+def parallel_nodes_communication_helper(
+ func, func_args_kwargs, reporter, skip_offline_nodes=False
+):
+ """
+ Help running node calls in parallel and handle communication exceptions.
+ Raise LibraryError on any failure.
+
+ function func function to be run, should be a function calling a node
+ iterable func_args_kwargs list of tuples: (*args, **kwargs)
+ bool skip_offline_nodes do not raise LibraryError if a node is unreachable
+ """
+ failure_severity = ReportItemSeverity.ERROR
+ failure_forceable = report_codes.SKIP_OFFLINE_NODES
+ if skip_offline_nodes:
+ failure_severity = ReportItemSeverity.WARNING
+ failure_forceable = None
+ report_items = []
+
+ def _parallel(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except NodeCommunicationException as e:
+ report_items.append(
+ node_communicator_exception_to_report_item(
+ e,
+ failure_severity,
+ failure_forceable
+ )
+ )
+ except LibraryError as e:
+ report_items.extend(e.args)
+
+ tools_run_parallel(_parallel, func_args_kwargs)
+ reporter.process_list(report_items)
diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
index b9a61f6..e94d327 100644
--- a/pcs/lib/nodes_task.py
+++ b/pcs/lib/nodes_task.py
@@ -8,14 +8,19 @@ from __future__ import (
import json
from pcs.common import report_codes
+from pcs.common.tools import run_parallel as tools_run_parallel
from pcs.lib import reports
-from pcs.lib.errors import ReportItemSeverity
+from pcs.lib.errors import LibraryError, ReportItemSeverity
from pcs.lib.external import (
NodeCommunicator,
NodeCommunicationException,
node_communicator_exception_to_report_item,
+ parallel_nodes_communication_helper,
+)
+from pcs.lib.corosync import (
+ live as corosync_live,
+ qdevice_client,
)
-from pcs.lib.corosync import live as corosync_live
def distribute_corosync_conf(
@@ -33,11 +38,9 @@ def distribute_corosync_conf(
if skip_offline_nodes:
failure_severity = ReportItemSeverity.WARNING
failure_forceable = None
-
- reporter.process(reports.corosync_config_distribution_started())
report_items = []
- # TODO use parallel communication
- for node in node_addr_list:
+
+ def _parallel(node):
try:
corosync_live.set_remote_corosync_conf(
node_communicator,
@@ -62,6 +65,12 @@ def distribute_corosync_conf(
failure_forceable
)
)
+
+ reporter.process(reports.corosync_config_distribution_started())
+ tools_run_parallel(
+ _parallel,
+ [((node, ), {}) for node in node_addr_list]
+ )
reporter.process_list(report_items)
def check_corosync_offline_on_nodes(
@@ -77,13 +86,11 @@ def check_corosync_offline_on_nodes(
if skip_offline_nodes:
failure_severity = ReportItemSeverity.WARNING
failure_forceable = None
-
- reporter.process(reports.corosync_not_running_check_started())
report_items = []
- # TODO use parallel communication
- for node in node_addr_list:
+
+ def _parallel(node):
try:
- status = node_communicator.call_node(node, "remote/status", "")
+ status = node_communicator.call_node(node, "remote/status", None)
if not json.loads(status)["corosync"]:
reporter.process(
reports.corosync_not_running_on_node_ok(node.label)
@@ -115,8 +122,48 @@ def check_corosync_offline_on_nodes(
failure_forceable
)
)
+
+ reporter.process(reports.corosync_not_running_check_started())
+ tools_run_parallel(
+ _parallel,
+ [((node, ), {}) for node in node_addr_list]
+ )
reporter.process_list(report_items)
+def qdevice_reload_on_nodes(
+ node_communicator, reporter, node_addr_list, skip_offline_nodes=False
+):
+ """
+ Reload corosync-qdevice configuration on cluster nodes
+ NodeAddressesList node_addr_list nodes to reload config on
+ bool skip_offline_nodes don't raise an error on node communication errors
+ """
+ reporter.process(reports.qdevice_client_reload_started())
+ parallel_params = [
+ [(reporter, node_communicator, node), {}]
+ for node in node_addr_list
+ ]
+ # catch an exception so we try to start qdevice on nodes where we stopped it
+ report_items = []
+ try:
+ parallel_nodes_communication_helper(
+ qdevice_client.remote_client_stop,
+ parallel_params,
+ reporter,
+ skip_offline_nodes
+ )
+ except LibraryError as e:
+ report_items.extend(e.args)
+ try:
+ parallel_nodes_communication_helper(
+ qdevice_client.remote_client_start,
+ parallel_params,
+ reporter,
+ skip_offline_nodes
+ )
+ except LibraryError as e:
+ report_items.extend(e.args)
+ reporter.process_list(report_items)
def node_check_auth(communicator, node):
"""
diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker.py
index 14745c5..fd6f97b 100644
--- a/pcs/lib/pacemaker.py
+++ b/pcs/lib/pacemaker.py
@@ -55,24 +55,21 @@ def get_cib(xml):
except (etree.XMLSyntaxError, etree.DocumentInvalid):
raise LibraryError(reports.cib_load_error_invalid_format())
-def replace_cib_configuration_xml(runner, xml):
- output, retval = runner.run(
- [
- __exec("cibadmin"),
- "--replace", "--scope", "configuration", "--verbose", "--xml-pipe"
- ],
- stdin_string=xml
- )
+def replace_cib_configuration_xml(runner, xml, cib_upgraded=False):
+ cmd = [__exec("cibadmin"), "--replace", "--verbose", "--xml-pipe"]
+ if not cib_upgraded:
+ cmd += ["--scope", "configuration"]
+ output, retval = runner.run(cmd, stdin_string=xml)
if retval != 0:
raise LibraryError(reports.cib_push_error(retval, output))
-def replace_cib_configuration(runner, tree):
+def replace_cib_configuration(runner, tree, cib_upgraded=False):
#etree returns bytes: b'xml'
#python 3 removed .encode() from bytes
#run(...) calls subprocess.Popen.communicate which calls encode...
#so here is bytes to str conversion
xml = etree.tostring(tree).decode()
- return replace_cib_configuration_xml(runner, xml)
+ return replace_cib_configuration_xml(runner, xml, cib_upgraded)
def get_local_node_status(runner):
try:
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index 4f4f580..9ececf9 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -552,6 +552,32 @@ def corosync_running_on_node_fail(node):
info={"node": node}
)
+def corosync_quorum_get_status_error(reason):
+ """
+ unable to get runtime status of quorum on local node
+ string reason an error message
+ """
+ return ReportItem.error(
+ report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
+ "Unable to get quorum status: {reason}",
+ info={
+ "reason": reason,
+ }
+ )
+
+def corosync_quorum_set_expected_votes_error(reason):
+ """
+ unable to set expcted votes in a live cluster
+ string reason an error message
+ """
+ return ReportItem.error(
+ report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR,
+ "Unable to set expected votes: {reason}",
+ info={
+ "reason": reason,
+ }
+ )
+
def corosync_config_reloaded():
"""
corosync configuration has been reloaded
@@ -614,6 +640,21 @@ def corosync_config_parser_other_error():
"Unable to parse corosync config"
)
+def corosync_options_incompatible_with_qdevice(options):
+ """
+ cannot set specified corosync options when qdevice is in use
+ iterable options incompatible options names
+ """
+ return ReportItem.error(
+ report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE,
+ "These options cannot be set when the cluster uses a quorum device: "
+ + "{options_names_str}",
+ info={
+ "options_names": options,
+ "options_names_str": ", ".join(sorted(options)),
+ }
+ )
+
def qdevice_already_defined():
"""
qdevice is already set up in a cluster, when it was expected not to be
@@ -641,6 +682,15 @@ def qdevice_remove_or_cluster_stop_needed():
"You need to stop the cluster or remove qdevice from cluster to continue"
)
+def qdevice_client_reload_started():
+ """
+ qdevice client configuration is about to be reloaded on nodes
+ """
+ return ReportItem.info(
+ report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
+ "Reloading qdevice configuration on nodes..."
+ )
+
def qdevice_already_initialized(model):
"""
cannot create qdevice on local host, it has been already created
@@ -654,6 +704,19 @@ def qdevice_already_initialized(model):
}
)
+def qdevice_not_initialized(model):
+ """
+ cannot work with qdevice on local host, it has not been created yet
+ string model qdevice model
+ """
+ return ReportItem.error(
+ report_codes.QDEVICE_NOT_INITIALIZED,
+ "Quorum device '{model}' has not been initialized yet",
+ info={
+ "model": model,
+ }
+ )
+
def qdevice_initialization_success(model):
"""
qdevice was successfully initialized on local host
@@ -682,6 +745,72 @@ def qdevice_initialization_error(model, reason):
}
)
+def qdevice_certificate_distribution_started():
+ """
+ Qdevice certificates are about to be set up on nodes
+ """
+ return ReportItem.info(
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ "Setting up qdevice certificates on nodes..."
+ )
+
+def qdevice_certificate_accepted_by_node(node):
+ """
+ Qdevice certificates have been saved to a node
+ string node node on which certificates have been saved
+ """
+ return ReportItem.info(
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ "{node}: Succeeded",
+ info={"node": node}
+ )
+
+def qdevice_certificate_removal_started():
+ """
+ Qdevice certificates are about to be removed from nodes
+ """
+ return ReportItem.info(
+ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
+ "Removing qdevice certificates from nodes..."
+ )
+
+def qdevice_certificate_removed_from_node(node):
+ """
+ Qdevice certificates have been removed from a node
+ string node node on which certificates have been deleted
+ """
+ return ReportItem.info(
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ "{node}: Succeeded",
+ info={"node": node}
+ )
+
+def qdevice_certificate_import_error(reason):
+ """
+ an error occured when importing qdevice certificate to a node
+ string reason an error message
+ """
+ return ReportItem.error(
+ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
+ "Unable to import quorum device certificate: {reason}",
+ info={
+ "reason": reason,
+ }
+ )
+
+def qdevice_certificate_sign_error(reason):
+ """
+ an error occured when signing qdevice certificate
+ string reason an error message
+ """
+ return ReportItem.error(
+ report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR,
+ "Unable to sign quorum device certificate: {reason}",
+ info={
+ "reason": reason,
+ }
+ )
+
def qdevice_destroy_success(model):
"""
qdevice configuration successfully removed from local host
@@ -710,6 +839,21 @@ def qdevice_destroy_error(model, reason):
}
)
+def qdevice_get_status_error(model, reason):
+ """
+ unable to get runtime status of qdevice
+ string model qdevice model
+ string reason an error message
+ """
+ return ReportItem.error(
+ report_codes.QDEVICE_GET_STATUS_ERROR,
+ "Unable to get status of quorum device '{model}': {reason}",
+ info={
+ "model": model,
+ "reason": reason,
+ }
+ )
+
def cman_unsupported_command():
"""
requested library command is not available as local cluster is CMAN based
@@ -1022,31 +1166,55 @@ def service_start_started(service):
}
)
-def service_start_error(service, reason):
+def service_start_error(service, reason, node=None):
"""
system service start failed
string service service name or description
string reason error message
+ string node node on which service has been requested to start
"""
+ msg = "Unable to start {service}: {reason}"
return ReportItem.error(
report_codes.SERVICE_START_ERROR,
- "Unable to start {service}: {reason}",
+ msg if node is None else "{node}: " + msg,
info={
"service": service,
"reason": reason,
+ "node": node,
}
)
-def service_start_success(service):
+def service_start_success(service, node=None):
"""
system service was started successfully
string service service name or description
+ string node node on which service has been requested to start
"""
+ msg = "{service} started"
return ReportItem.info(
report_codes.SERVICE_START_SUCCESS,
- "{service} started",
+ msg if node is None else "{node}: " + msg,
info={
"service": service,
+ "node": node,
+ }
+ )
+
+def service_start_skipped(service, reason, node=None):
+ """
+ starting system service was skipped, no error occured
+ string service service name or description
+ string reason why the start has been skipped
+ string node node on which service has been requested to start
+ """
+ msg = "not starting {service} - {reason}"
+ return ReportItem.info(
+ report_codes.SERVICE_START_SKIPPED,
+ msg if node is None else "{node}: " + msg,
+ info={
+ "service": service,
+ "reason": reason,
+ "node": node,
}
)
@@ -1063,31 +1231,37 @@ def service_stop_started(service):
}
)
-def service_stop_error(service, reason):
+def service_stop_error(service, reason, node=None):
"""
system service stop failed
string service service name or description
string reason error message
+ string node node on which service has been requested to stop
"""
+ msg = "Unable to stop {service}: {reason}"
return ReportItem.error(
report_codes.SERVICE_STOP_ERROR,
- "Unable to stop {service}: {reason}",
+ msg if node is None else "{node}: " + msg,
info={
"service": service,
"reason": reason,
+ "node": node,
}
)
-def service_stop_success(service):
+def service_stop_success(service, node=None):
"""
system service was stopped successfully
string service service name or description
+ string node node on which service has been requested to stop
"""
+ msg = "{service} stopped"
return ReportItem.info(
report_codes.SERVICE_STOP_SUCCESS,
- "{service} stopped",
+ msg if node is None else "{node}: " + msg,
info={
"service": service,
+ "node": node,
}
)
@@ -1121,6 +1295,19 @@ def service_kill_success(services):
}
)
+def service_enable_started(service):
+ """
+ system service is being enabled
+ string service service name or description
+ """
+ return ReportItem.info(
+ report_codes.SERVICE_ENABLE_STARTED,
+ "Enabling {service}...",
+ info={
+ "service": service,
+ }
+ )
+
def service_enable_error(service, reason, node=None):
"""
system service enable failed
@@ -1143,7 +1330,7 @@ def service_enable_success(service, node=None):
"""
system service was enabled successfully
string service service name or description
- string node node on which service was enabled
+ string node node on which service has been enabled
"""
msg = "{service} enabled"
return ReportItem.info(
@@ -1155,6 +1342,37 @@ def service_enable_success(service, node=None):
}
)
+def service_enable_skipped(service, reason, node=None):
+ """
+ enabling system service was skipped, no error occured
+ string service service name or description
+ string reason why the enabling has been skipped
+ string node node on which service has been requested to enable
+ """
+ msg = "not enabling {service} - {reason}"
+ return ReportItem.info(
+ report_codes.SERVICE_ENABLE_SKIPPED,
+ msg if node is None else "{node}: " + msg,
+ info={
+ "service": service,
+ "reason": reason,
+ "node": node,
+ }
+ )
+
+def service_disable_started(service):
+ """
+ system service is being disabled
+ string service service name or description
+ """
+ return ReportItem.info(
+ report_codes.SERVICE_DISABLE_STARTED,
+ "Disabling {service}...",
+ info={
+ "service": service,
+ }
+ )
+
def service_disable_error(service, reason, node=None):
"""
system service disable failed
@@ -1189,7 +1407,6 @@ def service_disable_success(service, node=None):
}
)
-
def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None):
"""
Invalid format of metadata
@@ -1201,7 +1418,6 @@ def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None):
forceable=forceable
)
-
def unable_to_get_agent_metadata(
agent, reason, severity=ReportItemSeverity.ERROR, forceable=None
):
@@ -1436,3 +1652,94 @@ def cluster_restart_required_to_apply_changes():
report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES,
"Cluster restart is required in order to apply these changes."
)
+
+
+def cib_alert_recipient_already_exists(alert_id, recipient_value):
+ """
+ Error that recipient already exists.
+
+ alert_id -- id of alert to which recipient belongs
+ recipient_value -- value of recipient
+ """
+ return ReportItem.error(
+ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+ "Recipient '{recipient}' in alert '{alert}' already exists.",
+ info={
+ "recipient": recipient_value,
+ "alert": alert_id
+ }
+ )
+
+
+def cib_alert_recipient_not_found(alert_id, recipient_value):
+ """
+ Specified recipient not found.
+
+ alert_id -- id of alert to which recipient should belong
+ recipient_value -- recipient value
+ """
+ return ReportItem.error(
+ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+ "Recipient '{recipient}' not found in alert '{alert}'.",
+ info={
+ "recipient": recipient_value,
+ "alert": alert_id
+ }
+ )
+
+
+def cib_alert_not_found(alert_id):
+ """
+ Alert with specified id doesn't exist.
+
+ alert_id -- id of alert
+ """
+ return ReportItem.error(
+ report_codes.CIB_ALERT_NOT_FOUND,
+ "Alert '{alert}' not found.",
+ info={"alert": alert_id}
+ )
+
+
+def cib_upgrade_successful():
+ """
+ Upgrade of CIB schema was successful.
+ """
+ return ReportItem.info(
+ report_codes.CIB_UPGRADE_SUCCESSFUL,
+ "CIB has been upgraded to the latest schema version."
+ )
+
+
+def cib_upgrade_failed(reason):
+ """
+ Upgrade of CIB schema failed.
+
+ reason -- reason of failure
+ """
+ return ReportItem.error(
+ report_codes.CIB_UPGRADE_FAILED,
+ "Upgrading of CIB to the latest schema failed: {reason}",
+ info={"reason": reason}
+ )
+
+
+def unable_to_upgrade_cib_to_required_version(
+ current_version, required_version
+):
+ """
+ Unable to upgrade CIB to minimal required schema version.
+
+ current_version -- current version of CIB schema
+ required_version -- required version of CIB schema
+ """
+ return ReportItem.error(
+ report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
+ "Unable to upgrade CIB to required schema version {required_version} "
+ "or higher. Current version is {current_version}. Newer version of "
+ "pacemaker is needed.",
+ info={
+ "required_version": "{0}.{1}.{2}".format(*required_version),
+ "current_version": "{0}.{1}.{2}".format(*current_version)
+ }
+ )
diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
index 1330bfc..4488a73 100644
--- a/pcs/lib/sbd.py
+++ b/pcs/lib/sbd.py
@@ -57,7 +57,7 @@ def check_sbd(communicator, node, watchdog):
return communicator.call_node(
node,
"remote/check_sbd",
- NodeCommunicator.format_data_dict({"watchdog": watchdog})
+ NodeCommunicator.format_data_dict([("watchdog", watchdog)])
)
@@ -119,7 +119,7 @@ def set_sbd_config(communicator, node, config):
communicator.call_node(
node,
"remote/set_sbd_config",
- NodeCommunicator.format_data_dict({"config": config})
+ NodeCommunicator.format_data_dict([("config", config)])
)
@@ -171,7 +171,7 @@ def enable_sbd_service(communicator, node):
communicator -- NodeCommunicator
node -- NodeAddresses
"""
- communicator.call_node(node, "remote/sbd_enable", "")
+ communicator.call_node(node, "remote/sbd_enable", None)
def enable_sbd_service_on_node(report_processor, node_communicator, node):
@@ -215,7 +215,7 @@ def disable_sbd_service(communicator, node):
communicator -- NodeCommunicator
node -- NodeAddresses
"""
- communicator.call_node(node, "remote/sbd_disable", "")
+ communicator.call_node(node, "remote/sbd_disable", None)
def disable_sbd_service_on_node(report_processor, node_communicator, node):
@@ -259,7 +259,7 @@ def set_stonith_watchdog_timeout_to_zero(communicator, node):
node -- NodeAddresses
"""
communicator.call_node(
- node, "remote/set_stonith_watchdog_timeout_to_zero", ""
+ node, "remote/set_stonith_watchdog_timeout_to_zero", None
)
@@ -292,7 +292,7 @@ def remove_stonith_watchdog_timeout(communicator, node):
communicator -- NodeCommunicator
node -- NodeAddresses
"""
- communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", "")
+ communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", None)
def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list):
@@ -351,7 +351,7 @@ def get_sbd_config(communicator, node):
communicator -- NodeCommunicator
node -- NodeAddresses
"""
- return communicator.call_node(node, "remote/get_sbd_config", "")
+ return communicator.call_node(node, "remote/get_sbd_config", None)
def is_sbd_enabled(runner):
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 38a4913..4426444 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "June 2016" "pcs 0.9.152" "System Administration Utilities"
+.TH PCS "8" "July 2016" "pcs 0.9.153" "System Administration Utilities"
.SH NAME
pcs \- pacemaker/corosync configuration system
.SH SYNOPSIS
@@ -56,6 +56,9 @@ Manage pcs daemon.
.TP
node
Manage cluster nodes.
+.TP
+alert
+Manage pacemaker alerts.
.SS "resource"
.TP
[show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR]
@@ -256,8 +259,8 @@ Sync corosync configuration to all nodes found from current corosync.conf file (
cib [filename] [scope=<scope> | \fB\-\-config\fR]
Get the raw xml from the CIB (Cluster Information Base). If a filename is provided, we save the CIB to that file, otherwise the CIB is printed. Specify scope to get a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status. \fB\-\-config\fR is the same as scope=configuration. Do not specify a scope if you want to edit the saved CIB using pcs (pcs -f <command>).
.TP
-cib-push <filename> [scope=<scope> | \fB\-\-config\fR]
-Push the raw xml from <filename> to the CIB (Cluster Information Base). You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push. Specify scope to push a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults. \fB\-\-config\fR is the same as scope=configuration. U [...]
+cib-push <filename> [scope=<scope> | \fB\-\-config\fR] [\fB\-\-wait\fR[=<n>]]
+Push the raw xml from <filename> to the CIB (Cluster Information Base). You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push. Specify scope to push a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults. \fB\-\-config\fR is the same as scope=configuration. U [...]
.TP
cib\-upgrade
Upgrade the CIB to conform to the latest version of the document schema.
@@ -482,7 +485,7 @@ ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id]
Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
.TP
ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
-Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket. Optional constraint option is loss-policy=fence/stop/freeze/demote.
+Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
.TP
remove [constraint id]...
Remove constraint(s) or constraint rules with the specified id(s).
@@ -515,8 +518,11 @@ rule remove <rule id>
Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed.
.SS "qdevice"
.TP
+status <device model> [\fB\-\-full\fR] [<cluster name>]
+Show runtime status of specified model of quorum device provider. Using \fB\-\-full\fR will give more detailed output. If <cluster name> is specified, only information about the specified cluster will be displayed.
+.TP
setup model <device model> [\fB\-\-enable\fR] [\fB\-\-start\fR]
-Configure specified model of quorum device provider. Quorum device then may be added to clusters by "pcs quorum device add" command. \fB\-\-start\fR will also start the provider. \fB\-\-enable\fR will configure the provider to start on boot.
+Configure specified model of quorum device provider. Quorum device then can be added to clusters by running "pcs quorum device add" command in a cluster. \fB\-\-start\fR will also start the provider. \fB\-\-enable\fR will configure the provider to start on boot.
.TP
destroy <device model>
Disable and stop specified model of quorum device provider and delete its configuration files.
@@ -528,7 +534,7 @@ stop <device model>
Stop specified model of quorum device provider.
.TP
kill <device model>
-Force specified model of quorum device provider to stop (performs kill -9).
+Force specified model of quorum device provider to stop (performs kill \-9). Note that init system (e.g. systemd) can detect that the qdevice is not running and start it again. If you want to stop the qdevice, run "pcs qdevice stop" command.
.TP
enable <device model>
Configure specified model of quorum device provider to start on boot.
@@ -540,14 +546,25 @@ Configure specified model of quorum device provider to not start on boot.
config
Show quorum configuration.
.TP
-device add [generic options] model <device model> [model options]
-Add quorum device to cluster. Quorum device needs to be created first by "pcs qdevice setup" command.
+status
+Show quorum runtime status.
+.TP
+device add [<generic options>] model <device model> [<model options>]
+Add a quorum device to the cluster. Quorum device needs to be created first by "pcs qdevice setup" command. It is not possible to use more than one quorum device in a cluster simultaneously. Generic options, model and model options are all documented in corosync's corosync\-qdevice(8) man page.
.TP
device remove
-Remove quorum device from cluster.
+Remove a quorum device from the cluster.
+.TP
+device status [\fB\-\-full\fR]
+Show quorum device runtime status. Using \fB\-\-full\fR will give more detailed output.
+.TP
+device update [<generic options>] [model <model options>]
+Add/Change quorum device options. Generic options and model options are all documented in corosync's corosync\-qdevice(8) man page. Requires the cluster to be stopped.
+
+WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine.
.TP
-device update [generic options] [model <model options>]
-Add/Change quorum device options. Requires cluster to be stopped.
+expected\-votes <votes>
+Set expected votes in the live cluster to specified value. This only affects the live cluster, not changes any configuration files.
.TP
unblock [\fB\-\-force\fR]
Cancel waiting for all nodes when establishing quorum. Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless. This command should ONLY be used when nodes which the cluster is waiting for have been confirmed to be powered off and to have no access to shared resources.
@@ -555,7 +572,7 @@ Cancel waiting for all nodes when establishing quorum. Useful in situations whe
.B WARNING: If the nodes are not actually powered off or they do have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed.
.TP
update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
-Add/Change quorum options. At least one option must be specified. Options are documented in corosync's votequorum(5) man page. Requires cluster to be stopped.
+Add/Change quorum options. At least one option must be specified. Options are documented in corosync's votequorum(5) man page. Requires the cluster to be stopped.
.SS "status"
.TP
[status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
@@ -635,6 +652,28 @@ Remove node from standby mode (the node specified will now be able to host resou
.TP
utilization [<node> [<name>=<value> ...]]
Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram=
+.SS "alert"
+.TP
+[config|show]
+Show all configured alerts.
+.TP
+create path=<path> [id=<alert\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
+Create new alert with specified path. Id will be automatically generated if it is not specified.
+.TP
+update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
+Update existing alert with specified id.
+.TP
+remove <alert\-id>
+Remove alert with specified id.
+.TP
+recipient add <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
+Add new recipient to specified alert.
+.TP
+recipient update <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
+Update existing recipient identified by alert and it's value.
+.TP
+recipient remove <alert\-id> <recipient\-value>
+Remove specified recipient.
.SH EXAMPLES
.TP
Show all resources
diff --git a/pcs/qdevice.py b/pcs/qdevice.py
index 1f06709..0037704 100644
--- a/pcs/qdevice.py
+++ b/pcs/qdevice.py
@@ -23,6 +23,8 @@ def qdevice_cmd(lib, argv, modifiers):
try:
if sub_cmd == "help":
usage.qdevice(argv)
+ elif sub_cmd == "status":
+ qdevice_status_cmd(lib, argv_next, modifiers)
elif sub_cmd == "setup":
qdevice_setup_cmd(lib, argv_next, modifiers)
elif sub_cmd == "destroy":
@@ -37,6 +39,11 @@ def qdevice_cmd(lib, argv, modifiers):
qdevice_enable_cmd(lib, argv_next, modifiers)
elif sub_cmd == "disable":
qdevice_disable_cmd(lib, argv_next, modifiers)
+ # following commands are internal use only, called from pcsd
+ elif sub_cmd == "sign-net-cert-request":
+ qdevice_sign_net_cert_request_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "net-client":
+ qdevice_net_client_cmd(lib, argv_next, modifiers)
else:
raise CmdLineInputError()
except LibraryError as e:
@@ -44,6 +51,35 @@ def qdevice_cmd(lib, argv, modifiers):
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(e, "qdevice", sub_cmd)
+# this is internal use only, called from pcsd
+def qdevice_net_client_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ utils.err("invalid command")
+
+ sub_cmd, argv_next = argv[0], argv[1:]
+ try:
+ if sub_cmd == "setup":
+ qdevice_net_client_setup_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "import-certificate":
+ qdevice_net_client_import_certificate_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "destroy":
+ qdevice_net_client_destroy(lib, argv_next, modifiers)
+ else:
+ raise CmdLineInputError("invalid command")
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.err(e.message)
+
+def qdevice_status_cmd(lib, argv, modifiers):
+ if len(argv) < 1 or len(argv) > 2:
+ raise CmdLineInputError()
+ model = argv[0]
+ cluster = None if len(argv) < 2 else argv[1]
+ print(
+ lib.qdevice.status(model, modifiers["full"], cluster)
+ )
+
def qdevice_setup_cmd(lib, argv, modifiers):
if len(argv) != 2:
raise CmdLineInputError()
@@ -87,3 +123,38 @@ def qdevice_disable_cmd(lib, argv, modifiers):
raise CmdLineInputError()
model = argv[0]
lib.qdevice.disable(model)
+
+# following commands are internal use only, called from pcsd
+
+def qdevice_net_client_setup_cmd(lib, argv, modifiers):
+ ca_certificate = _read_stdin()
+ lib.qdevice.client_net_setup(ca_certificate)
+
+def qdevice_net_client_import_certificate_cmd(lib, argv, modifiers):
+ certificate = _read_stdin()
+ lib.qdevice.client_net_import_certificate(certificate)
+
+def qdevice_net_client_destroy(lib, argv, modifiers):
+ lib.qdevice.client_net_destroy()
+
+def qdevice_sign_net_cert_request_cmd(lib, argv, modifiers):
+ certificate_request = _read_stdin()
+ signed = lib.qdevice.sign_net_cert_request(
+ certificate_request,
+ modifiers["name"]
+ )
+ if sys.version_info.major > 2:
+ # In python3 base64.b64encode returns bytes.
+ # In python2 base64.b64encode returns string.
+ # Bytes is printed like this: b'bytes content'
+ # and we need to get rid of that b'', so we change bytes to string.
+ # Since it's base64encoded, it's safe to use ascii.
+ signed = signed.decode("ascii")
+ print(signed)
+
+def _read_stdin():
+ # in python3 stdin returns str so we need to use buffer
+ if hasattr(sys.stdin, "buffer"):
+ return sys.stdin.buffer.read()
+ else:
+ return sys.stdin.read()
diff --git a/pcs/quorum.py b/pcs/quorum.py
index f793a21..2d54ed7 100644
--- a/pcs/quorum.py
+++ b/pcs/quorum.py
@@ -28,6 +28,10 @@ def quorum_cmd(lib, argv, modificators):
usage.quorum(argv)
elif sub_cmd == "config":
quorum_config_cmd(lib, argv_next, modificators)
+ elif sub_cmd == "expected-votes":
+ quorum_expected_votes_cmd(lib, argv_next, modificators)
+ elif sub_cmd == "status":
+ quorum_status_cmd(lib, argv_next, modificators)
elif sub_cmd == "device":
quorum_device_cmd(lib, argv_next, modificators)
elif sub_cmd == "unblock":
@@ -51,6 +55,8 @@ def quorum_device_cmd(lib, argv, modificators):
quorum_device_add_cmd(lib, argv_next, modificators)
elif sub_cmd == "remove":
quorum_device_remove_cmd(lib, argv_next, modificators)
+ elif sub_cmd == "status":
+ quorum_device_status_cmd(lib, argv_next, modificators)
elif sub_cmd == "update":
quorum_device_update_cmd(lib, argv_next, modificators)
else:
@@ -97,6 +103,26 @@ def quorum_config_to_str(config):
return lines
+def quorum_expected_votes_cmd(lib, argv, modificators):
+ if len(argv) != 1:
+ raise CmdLineInputError()
+ lib.quorum.set_expected_votes_live(argv[0])
+
+def quorum_status_cmd(lib, argv, modificators):
+ if argv:
+ raise CmdLineInputError()
+ print(lib.quorum.status())
+
+def quorum_update_cmd(lib, argv, modificators):
+ options = parse_args.prepare_options(argv)
+ if not options:
+ raise CmdLineInputError()
+
+ lib.quorum.set_options(
+ options,
+ skip_offline_nodes=modificators["skip_offline_nodes"]
+ )
+
def quorum_device_add_cmd(lib, argv, modificators):
# we expect "model" keyword once, followed by the actual model value
options_lists = parse_args.split_list(argv, "model")
@@ -131,6 +157,11 @@ def quorum_device_remove_cmd(lib, argv, modificators):
skip_offline_nodes=modificators["skip_offline_nodes"]
)
+def quorum_device_status_cmd(lib, argv, modificators):
+ if argv:
+ raise CmdLineInputError()
+ print(lib.quorum.status_device(modificators["full"]))
+
def quorum_device_update_cmd(lib, argv, modificators):
# we expect "model" keyword once
options_lists = parse_args.split_list(argv, "model")
@@ -154,13 +185,3 @@ def quorum_device_update_cmd(lib, argv, modificators):
force_options=modificators["force"],
skip_offline_nodes=modificators["skip_offline_nodes"]
)
-
-def quorum_update_cmd(lib, argv, modificators):
- options = parse_args.prepare_options(argv)
- if not options:
- raise CmdLineInputError()
-
- lib.quorum.set_options(
- options,
- skip_offline_nodes=modificators["skip_offline_nodes"]
- )
diff --git a/pcs/resource.py b/pcs/resource.py
index 284bdb2..9384a21 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -21,6 +21,8 @@ from pcs import (
constraint,
settings,
)
+from pcs.settings import pacemaker_wait_timeout_status as \
+ PACEMAKER_WAIT_TIMEOUT_STATUS
import pcs.lib.cib.acl as lib_acl
import pcs.lib.pacemaker as lib_pacemaker
from pcs.lib.external import get_systemd_services
@@ -31,7 +33,6 @@ from pcs.lib.pacemaker_values import timeout_to_seconds
import pcs.lib.resource_agent as lib_ra
-PACEMAKER_WAIT_TIMEOUT_STATUS = 62
RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
def resource_cmd(argv):
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index 3acd8e0..89b4d0e 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -2,25 +2,27 @@ import os.path
pacemaker_binaries = "/usr/sbin/"
corosync_binaries = "/usr/sbin/"
+corosync_qnet_binaries = "/usr/bin/"
ccs_binaries = "/usr/sbin/"
corosync_conf_dir = "/etc/corosync/"
corosync_conf_file = os.path.join(corosync_conf_dir, "corosync.conf")
corosync_uidgid_dir = os.path.join(corosync_conf_dir, "uidgid.d/")
corosync_qdevice_net_server_certs_dir = os.path.join(
corosync_conf_dir,
- "qdevice/net/qnetd/nssdb"
+ "qnetd/nssdb"
)
corosync_qdevice_net_client_certs_dir = os.path.join(
corosync_conf_dir,
- "qdevice/net/node/nssdb"
+ "qdevice/net/nssdb"
)
+corosync_qdevice_net_client_ca_file_name = "qnetd-cacert.crt"
cluster_conf_file = "/etc/cluster/cluster.conf"
fence_agent_binaries = "/usr/sbin/"
pengine_binary = "/usr/libexec/pacemaker/pengine"
crmd_binary = "/usr/libexec/pacemaker/crmd"
cib_binary = "/usr/libexec/pacemaker/cib"
stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.152"
+pcs_version = "0.9.153"
crm_report = pacemaker_binaries + "crm_report"
crm_verify = pacemaker_binaries + "crm_verify"
crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
@@ -38,3 +40,4 @@ ocf_resources = os.path.join(ocf_root, "resource.d/")
nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/"
sbd_watchdog_default = "/dev/watchdog"
sbd_config = "/etc/sysconfig/sbd"
+pacemaker_wait_timeout_status = 62
diff --git a/pcs/test/resources/cib-empty-2.5.xml b/pcs/test/resources/cib-empty-2.5.xml
new file mode 100644
index 0000000..1b4fb0a
--- /dev/null
+++ b/pcs/test/resources/cib-empty-2.5.xml
@@ -0,0 +1,10 @@
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+ <configuration>
+ <crm_config/>
+ <nodes>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/pcs/test/resources/qdevice-certs/qnetd-cacert.crt b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt
new file mode 100644
index 0000000..34dcab0
--- /dev/null
+++ b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt
@@ -0,0 +1 @@
+certificate data
\ No newline at end of file
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
index 85dd20c..5b29918 100755
--- a/pcs/test/suite.py
+++ b/pcs/test/suite.py
@@ -74,7 +74,7 @@ def run_tests(tests, verbose=False, color=False):
verbosity=2 if verbose else 1,
resultclass=resultclass
)
- testRunner.run(tests)
+ return testRunner.run(tests)
put_package_to_path()
explicitly_enumerated_tests = [
@@ -85,7 +85,7 @@ explicitly_enumerated_tests = [
"--all-but",
)
]
-run_tests(
+test_result = run_tests(
discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv),
verbose="-v" in sys.argv,
color=(
@@ -99,6 +99,8 @@ run_tests(
)
),
)
+if not test_result.wasSuccessful():
+ sys.exit(1)
# assume that we are in pcs root dir
#
diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
new file mode 100644
index 0000000..905dc9f
--- /dev/null
+++ b/pcs/test/test_alert.py
@@ -0,0 +1,363 @@
+
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import shutil
+import sys
+
+from pcs.test.tools.misc import (
+ get_test_resource as rc,
+ is_minimum_pacemaker_version,
+)
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.pcs_runner import PcsRunner
+
+major, minor = sys.version_info[:2]
+if major == 2 and minor == 6:
+ import unittest2 as unittest
+else:
+ import unittest
+
+
+old_cib = rc("cib-empty.xml")
+empty_cib = rc("cib-empty-2.5.xml")
+temp_cib = rc("temp-cib.xml")
+
+
+ALERTS_SUPPORTED = is_minimum_pacemaker_version(1, 1, 15)
+ALERTS_NOT_SUPPORTED_MSG = "Pacemaker version is too old (must be >= 1.1.15)" +\
+ " to test alerts"
+
+
+class PcsAlertTest(unittest.TestCase, AssertPcsMixin):
+ def setUp(self):
+ shutil.copy(empty_cib, temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+
+
+ at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+class AlertCibUpgradeTest(unittest.TestCase, AssertPcsMixin):
+ def setUp(self):
+ shutil.copy(old_cib, temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+
+ def test_cib_upgrade(self):
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ No alerts defined
+"""
+ )
+
+ self.assert_pcs_success(
+ "alert create path=test",
+ "CIB has been upgraded to the latest schema version.\n"
+ )
+
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+"""
+ )
+
+
+ at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+class CreateAlertTest(PcsAlertTest):
+ def test_create_multiple_without_id(self):
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ No alerts defined
+"""
+ )
+
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_success("alert create path=test2")
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+ Alert: alert-1 (path=test)
+ Alert: alert-2 (path=test2)
+"""
+ )
+
+ def test_create_multiple_with_id(self):
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ No alerts defined
+"""
+ )
+ self.assert_pcs_success("alert create id=alert1 path=test")
+ self.assert_pcs_success(
+ "alert create id=alert2 description=desc path=test"
+ )
+ self.assert_pcs_success(
+ "alert create description=desc2 path=test2 id=alert3"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert1 (path=test)
+ Alert: alert2 (path=test)
+ Description: desc
+ Alert: alert3 (path=test2)
+ Description: desc2
+"""
+ )
+
+ def test_create_with_options(self):
+ self.assert_pcs_success(
+ "alert create id=alert1 description=desc path=test "
+ "options opt1=val1 opt2=val2 meta m1=v1 m2=v2"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert1 (path=test)
+ Description: desc
+ Options: opt1=val1 opt2=val2
+ Meta options: m1=v1 m2=v2
+"""
+ )
+
+ def test_already_exists(self):
+ self.assert_pcs_success("alert create id=alert1 path=test")
+ self.assert_pcs_fail(
+ "alert create id=alert1 path=test",
+ "Error: 'alert1' already exists\n"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert1 (path=test)
+"""
+ )
+
+ def test_path_is_required(self):
+ self.assert_pcs_fail(
+ "alert create id=alert1",
+ "Error: required option 'path' is missing\n"
+ )
+
+
+ at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+class UpdateAlertTest(PcsAlertTest):
+ def test_update_everything(self):
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ No alerts defined
+"""
+ )
+ self.assert_pcs_success(
+ "alert create id=alert1 description=desc path=test "
+ "options opt1=val1 opt2=val2 meta m1=v1 m2=v2"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert1 (path=test)
+ Description: desc
+ Options: opt1=val1 opt2=val2
+ Meta options: m1=v1 m2=v2
+"""
+ )
+ self.assert_pcs_success(
+ "alert update alert1 description=new_desc path=/new/path "
+ "options opt1= opt2=test opt3=1 meta m1= m2=v m3=3"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert1 (path=/new/path)
+ Description: new_desc
+ Options: opt2=test opt3=1
+ Meta options: m2=v m3=3
+"""
+ )
+
+ def test_not_existing_alert(self):
+ self.assert_pcs_fail(
+ "alert update alert1", "Error: Alert 'alert1' not found.\n"
+ )
+
+
+ at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+class RemoveAlertTest(PcsAlertTest):
+ def test_not_existing_alert(self):
+ self.assert_pcs_fail(
+ "alert remove alert1", "Error: Alert 'alert1' not found.\n"
+ )
+
+ def test_success(self):
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ No alerts defined
+"""
+ )
+
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+"""
+ )
+ self.assert_pcs_success("alert remove alert")
+
+
+ at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+class AddRecipientTest(PcsAlertTest):
+ def test_success(self):
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+"""
+ )
+ self.assert_pcs_success("alert recipient add alert rec_value")
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+ Recipients:
+ Recipient: rec_value
+"""
+ )
+ self.assert_pcs_success(
+ "alert recipient add alert rec_value2 description=description "
+ "options o1=1 o2=2 meta m1=v1 m2=v2"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+ Recipients:
+ Recipient: rec_value
+ Recipient: rec_value2
+ Description: description
+ Options: o1=1 o2=2
+ Meta options: m1=v1 m2=v2
+"""
+ )
+
+ def test_no_alert(self):
+ self.assert_pcs_fail(
+ "alert recipient add alert rec_value",
+ "Error: Alert 'alert' not found.\n"
+ )
+
+ def test_already_exists(self):
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_success("alert recipient add alert rec_value")
+ self.assert_pcs_fail(
+ "alert recipient add alert rec_value",
+ "Error: Recipient 'rec_value' in alert 'alert' already exists.\n"
+ )
+
+
+ at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+class UpdateRecipientAlert(PcsAlertTest):
+ def test_success(self):
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_success(
+ "alert recipient add alert rec_value description=description "
+ "options o1=1 o2=2 meta m1=v1 m2=v2"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+ Recipients:
+ Recipient: rec_value
+ Description: description
+ Options: o1=1 o2=2
+ Meta options: m1=v1 m2=v2
+"""
+ )
+ self.assert_pcs_success(
+ "alert recipient update alert rec_value description=desc "
+ "options o1= o2=v2 o3=3 meta m1= m2=2 m3=3"
+ )
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+ Recipients:
+ Recipient: rec_value
+ Description: desc
+ Options: o2=v2 o3=3
+ Meta options: m2=2 m3=3
+"""
+ )
+
+ def test_no_alert(self):
+ self.assert_pcs_fail(
+ "alert recipient update alert rec_value description=desc",
+ "Error: Alert 'alert' not found.\n"
+ )
+
+ def test_no_recipient(self):
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_fail(
+ "alert recipient update alert rec_value description=desc",
+ "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
+ )
+
+
+ at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+class RemoveRecipientTest(PcsAlertTest):
+ def test_success(self):
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_success("alert recipient add alert rec_value")
+ self.assert_pcs_success(
+ "alert config",
+ """\
+Alerts:
+ Alert: alert (path=test)
+ Recipients:
+ Recipient: rec_value
+"""
+ )
+ self.assert_pcs_success("alert recipient remove alert rec_value")
+
+ def test_no_alert(self):
+ self.assert_pcs_fail(
+ "alert recipient remove alert rec_value",
+ "Error: Alert 'alert' not found.\n"
+ )
+
+ def test_no_recipient(self):
+ self.assert_pcs_success("alert create path=test")
+ self.assert_pcs_fail(
+ "alert recipient remove alert rec_value",
+ "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
+ )
diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
index 405a270..1149a3f 100644
--- a/pcs/test/test_lib_cib_tools.py
+++ b/pcs/test/test_lib_cib_tools.py
@@ -7,12 +7,18 @@ from __future__ import (
from unittest import TestCase
-from pcs.test.tools.assertions import assert_raise_library_error
+from lxml import etree
+
+from pcs.test.tools.assertions import (
+ assert_raise_library_error,
+ assert_xml_equal,
+)
from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_mock import mock
from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
from pcs.common import report_codes
+from pcs.lib.external import CommandRunner
from pcs.lib.errors import ReportItemSeverity as severities
from pcs.lib.cib import tools as lib
@@ -145,3 +151,176 @@ class ValidateIdDoesNotExistsTest(TestCase):
),
)
does_id_exists.assert_called_once_with("tree", "some-id")
+
+
+class GetSubElementTest(TestCase):
+ def setUp(self):
+ self.root = etree.Element("root")
+ self.sub = etree.SubElement(self.root, "sub_element")
+
+ def test_sub_element_exists(self):
+ self.assertEqual(
+ self.sub, lib.get_sub_element(self.root, "sub_element")
+ )
+
+ def test_new_no_id(self):
+ assert_xml_equal(
+ '<new_element/>',
+ etree.tostring(
+ lib.get_sub_element(self.root, "new_element")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <root>
+ <sub_element/>
+ <new_element/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+ def test_new_with_id(self):
+ assert_xml_equal(
+ '<new_element id="new_id"/>',
+ etree.tostring(
+ lib.get_sub_element(self.root, "new_element", "new_id")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <root>
+ <sub_element/>
+ <new_element id="new_id"/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+ def test_new_first(self):
+ lib.get_sub_element(self.root, "new_element", "new_id", 0)
+ assert_xml_equal(
+ """
+ <root>
+ <new_element id="new_id"/>
+ <sub_element/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+ def test_new_last(self):
+ lib.get_sub_element(self.root, "new_element", "new_id", None)
+ assert_xml_equal(
+ """
+ <root>
+ <sub_element/>
+ <new_element id="new_id"/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+
+class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
+ def test_missing_attribute(self):
+ assert_raise_library_error(
+ lambda: lib.get_pacemaker_version_by_which_cib_was_validated(
+ etree.XML("<cib/>")
+ ),
+ (
+ severities.ERROR,
+ report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
+ {}
+ )
+ )
+
+ def test_invalid_version(self):
+ assert_raise_library_error(
+ lambda: lib.get_pacemaker_version_by_which_cib_was_validated(
+ etree.XML('<cib validate-with="something-1.2.3"/>')
+ ),
+ (
+ severities.ERROR,
+ report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
+ {}
+ )
+ )
+
+ def test_no_revision(self):
+ self.assertEqual(
+ (1, 2, 0),
+ lib.get_pacemaker_version_by_which_cib_was_validated(
+ etree.XML('<cib validate-with="pacemaker-1.2"/>')
+ )
+ )
+
+ def test_with_revision(self):
+ self.assertEqual(
+ (1, 2, 3),
+ lib.get_pacemaker_version_by_which_cib_was_validated(
+ etree.XML('<cib validate-with="pacemaker-1.2.3"/>')
+ )
+ )
+
+
+ at mock.patch("pcs.lib.cib.tools.upgrade_cib")
+class EnsureCibVersionTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ self.cib = etree.XML('<cib validate-with="pacemaker-2.3.4"/>')
+
+ def test_same_version(self, mock_upgrade_cib):
+ self.assertTrue(
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 4)
+ ) is None
+ )
+ self.assertEqual(0, mock_upgrade_cib.run.call_count)
+
+ def test_higher_version(self, mock_upgrade_cib):
+ self.assertTrue(
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 3)
+ ) is None
+ )
+ self.assertEqual(0, mock_upgrade_cib.call_count)
+
+ def test_upgraded_same_version(self, mock_upgrade_cib):
+ upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.5"/>')
+ mock_upgrade_cib.return_value = upgraded_cib
+ self.assertEqual(
+ upgraded_cib,
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 5)
+ )
+ )
+ mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
+
+ def test_upgraded_higher_version(self, mock_upgrade_cib):
+ upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.6"/>')
+ mock_upgrade_cib.return_value = upgraded_cib
+ self.assertEqual(
+ upgraded_cib,
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 5)
+ )
+ )
+ mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
+
+ def test_upgraded_lower_version(self, mock_upgrade_cib):
+ mock_upgrade_cib.return_value = self.cib
+ assert_raise_library_error(
+ lambda: lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 5)
+ ),
+ (
+ severities.ERROR,
+ report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
+ {
+ "required_version": "2.3.5",
+ "current_version": "2.3.4"
+ }
+ )
+ )
+ mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py
index 3900c1d..ff588d5 100644
--- a/pcs/test/test_lib_commands_qdevice.py
+++ b/pcs/test/test_lib_commands_qdevice.py
@@ -6,6 +6,7 @@ from __future__ import (
)
from unittest import TestCase
+import base64
import logging
from pcs.test.tools.pcs_mock import mock
@@ -58,6 +59,11 @@ class QdeviceDisabledOnCmanTest(QdeviceTestCase):
lambda: lib.qdevice_destroy(self.lib_env, "bad model")
)
+ def test_status_text(self):
+ self.base_test(
+ lambda: lib.qdevice_status_text(self.lib_env, "bad model")
+ )
+
def test_enable(self):
self.base_test(
lambda: lib.qdevice_enable(self.lib_env, "bad model")
@@ -83,6 +89,30 @@ class QdeviceDisabledOnCmanTest(QdeviceTestCase):
lambda: lib.qdevice_kill(self.lib_env, "bad model")
)
+ def test_qdevice_net_sign_certificate_request(self):
+ self.base_test(
+ lambda: lib.qdevice_net_sign_certificate_request(
+ self.lib_env,
+ "certificate request",
+ "cluster name"
+ )
+ )
+
+ def test_client_net_setup(self):
+ self.base_test(
+ lambda: lib.client_net_setup(self.lib_env, "ca certificate")
+ )
+
+ def test_client_net_import_certificate(self):
+ self.base_test(
+ lambda: lib.client_net_import_certificate(self.lib_env, "cert")
+ )
+
+ def test_client_net_destroy(self):
+ self.base_test(
+ lambda: lib.client_net_destroy(self.lib_env)
+ )
+
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
class QdeviceBadModelTest(QdeviceTestCase):
@@ -110,6 +140,11 @@ class QdeviceBadModelTest(QdeviceTestCase):
lambda: lib.qdevice_destroy(self.lib_env, "bad model")
)
+ def test_status_text(self):
+ self.base_test(
+ lambda: lib.qdevice_status_text(self.lib_env, "bad model")
+ )
+
def test_enable(self):
self.base_test(
lambda: lib.qdevice_enable(self.lib_env, "bad model")
@@ -489,6 +524,80 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
)
+ at mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_status_cluster_text")
+ at mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_status_generic_text")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+class TestQdeviceNetStatusTextTest(QdeviceTestCase):
+ def test_success(self, mock_status_generic, mock_status_cluster):
+ mock_status_generic.return_value = "generic status info\n"
+ mock_status_cluster.return_value = "cluster status info\n"
+
+ self.assertEquals(
+ lib.qdevice_status_text(self.lib_env, "net"),
+ "generic status info\ncluster status info\n"
+ )
+
+ mock_status_generic.assert_called_once_with("mock_runner", False)
+ mock_status_cluster.assert_called_once_with("mock_runner", None, False)
+
+ def test_success_verbose(self, mock_status_generic, mock_status_cluster):
+ mock_status_generic.return_value = "generic status info\n"
+ mock_status_cluster.return_value = "cluster status info\n"
+
+ self.assertEquals(
+ lib.qdevice_status_text(self.lib_env, "net", verbose=True),
+ "generic status info\ncluster status info\n"
+ )
+
+ mock_status_generic.assert_called_once_with("mock_runner", True)
+ mock_status_cluster.assert_called_once_with("mock_runner", None, True)
+
+ def test_success_cluster(self, mock_status_generic, mock_status_cluster):
+ mock_status_generic.return_value = "generic status info\n"
+ mock_status_cluster.return_value = "cluster status info\n"
+
+ self.assertEquals(
+ lib.qdevice_status_text(self.lib_env, "net", cluster="name"),
+ "generic status info\ncluster status info\n"
+ )
+
+ mock_status_generic.assert_called_once_with("mock_runner", False)
+ mock_status_cluster.assert_called_once_with("mock_runner", "name", False)
+
+ def test_error_generic_status(
+ self, mock_status_generic, mock_status_cluster
+ ):
+ mock_status_generic.side_effect = LibraryError("mock_report_item")
+ mock_status_cluster.return_value = "cluster status info\n"
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.qdevice_status_text(self.lib_env, "net")
+ )
+
+ mock_status_generic.assert_called_once_with("mock_runner", False)
+ mock_status_cluster.assert_not_called()
+
+ def test_error_cluster_status(
+ self, mock_status_generic, mock_status_cluster
+ ):
+ mock_status_generic.return_value = "generic status info\n"
+ mock_status_cluster.side_effect = LibraryError("mock_report_item")
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.qdevice_status_text(self.lib_env, "net")
+ )
+
+ mock_status_generic.assert_called_once_with("mock_runner", False)
+ mock_status_cluster.assert_called_once_with("mock_runner", None, False)
+
+
@mock.patch("pcs.lib.external.enable_service")
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
@mock.patch.object(
@@ -757,3 +866,149 @@ class QdeviceNetKillTest(QdeviceTestCase):
"mock_runner",
["corosync-qnetd"]
)
+
+
+ at mock.patch(
+ "pcs.lib.commands.qdevice.qdevice_net.qdevice_sign_certificate_request"
+)
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+class QdeviceNetSignCertificateRequestTest(QdeviceTestCase):
+ def test_success(self, mock_qdevice_func):
+ qdevice_func_input = "certificate request".encode("utf-8")
+ qdevice_func_output = "signed certificate".encode("utf-8")
+ mock_qdevice_func.return_value = qdevice_func_output
+ cluster_name = "clusterName"
+
+ self.assertEqual(
+ base64.b64encode(qdevice_func_output),
+ lib.qdevice_net_sign_certificate_request(
+ self.lib_env,
+ base64.b64encode(qdevice_func_input),
+ cluster_name
+ )
+ )
+
+ mock_qdevice_func.assert_called_once_with(
+ "mock_runner",
+ qdevice_func_input,
+ cluster_name
+ )
+
+ def test_bad_input(self, mock_qdevice_func):
+ qdevice_func_input = "certificate request".encode("utf-8")
+ cluster_name = "clusterName"
+
+ assert_raise_library_error(
+ lambda: lib.qdevice_net_sign_certificate_request(
+ self.lib_env,
+ qdevice_func_input,
+ cluster_name
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "qnetd certificate request",
+ "option_value": qdevice_func_input,
+ "allowed_values": ["base64 encoded certificate"],
+ }
+ )
+ )
+
+ mock_qdevice_func.assert_not_called()
+
+
+ at mock.patch("pcs.lib.commands.qdevice.qdevice_net.client_setup")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+class ClientNetSetupTest(QdeviceTestCase):
+ def test_success(self, mock_qdevice_func):
+ qdevice_func_input = "CA certificate".encode("utf-8")
+
+ lib.client_net_setup(self.lib_env, base64.b64encode(qdevice_func_input))
+
+ mock_qdevice_func.assert_called_once_with(
+ "mock_runner",
+ qdevice_func_input
+ )
+
+ def test_bad_input(self, mock_qdevice_func):
+ qdevice_func_input = "CA certificate".encode("utf-8")
+
+ assert_raise_library_error(
+ lambda: lib.client_net_setup(self.lib_env, qdevice_func_input),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "qnetd CA certificate",
+ "option_value": qdevice_func_input,
+ "allowed_values": ["base64 encoded certificate"],
+ }
+ )
+ )
+
+ mock_qdevice_func.assert_not_called()
+
+
+ at mock.patch(
+ "pcs.lib.commands.qdevice.qdevice_net.client_import_certificate_and_key"
+)
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+class ClientNetImportCertificateTest(QdeviceTestCase):
+ def test_success(self, mock_qdevice_func):
+ qdevice_func_input = "client certificate".encode("utf-8")
+
+ lib.client_net_import_certificate(
+ self.lib_env,
+ base64.b64encode(qdevice_func_input)
+ )
+
+ mock_qdevice_func.assert_called_once_with(
+ "mock_runner",
+ qdevice_func_input
+ )
+
+ def test_bad_input(self, mock_qdevice_func):
+ qdevice_func_input = "client certificate".encode("utf-8")
+
+ assert_raise_library_error(
+ lambda: lib.client_net_import_certificate(
+ self.lib_env,
+ qdevice_func_input
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "qnetd client certificate",
+ "option_value": qdevice_func_input,
+ "allowed_values": ["base64 encoded certificate"],
+ }
+ )
+ )
+
+ mock_qdevice_func.assert_not_called()
+
+
+ at mock.patch("pcs.lib.commands.qdevice.qdevice_net.client_destroy")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+class ClientNetDestroyTest(QdeviceTestCase):
+ def test_success(self, mock_qdevice_func):
+ lib.client_net_destroy(self.lib_env)
+ mock_qdevice_func.assert_called_once_with()
+
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
index 5725381..826251a 100644
--- a/pcs/test/test_lib_commands_quorum.py
+++ b/pcs/test/test_lib_commands_quorum.py
@@ -21,7 +21,12 @@ from pcs.test.tools.pcs_mock import mock
from pcs.common import report_codes
from pcs.lib.env import LibraryEnvironment
-from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.lib.errors import (
+ LibraryError,
+ ReportItemSeverity as severity,
+)
+from pcs.lib.external import NodeCommunicationException
+from pcs.lib.node import NodeAddresses, NodeAddressesList
from pcs.lib.commands import quorum as lib
@@ -243,25 +248,102 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
mock_push_corosync.assert_not_called()
+ at mock.patch("pcs.lib.commands.quorum.corosync_live.get_quorum_status_text")
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+class StatusTextTest(TestCase, CmanMixin):
+ def setUp(self):
+ self.mock_logger = mock.MagicMock(logging.Logger)
+ self.mock_reporter = MockLibraryReportProcessor()
+ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+ def test_disabled_on_cman(self, mock_status):
+ self.assert_disabled_on_cman(
+ lambda: lib.status_text(self.lib_env)
+ )
+ mock_status.assert_not_called()
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ def test_success(self, mock_status):
+ mock_status.return_value = "status text"
+ self.assertEqual(
+ lib.status_text(self.lib_env),
+ "status text"
+ )
+ mock_status.assert_called_once_with("mock_runner")
+
+
+ at mock.patch("pcs.lib.commands.quorum.qdevice_client.get_status_text")
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+class StatusDeviceTextTest(TestCase, CmanMixin):
+ def setUp(self):
+ self.mock_logger = mock.MagicMock(logging.Logger)
+ self.mock_reporter = MockLibraryReportProcessor()
+ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+ def test_disabled_on_cman(self, mock_status):
+ self.assert_disabled_on_cman(
+ lambda: lib.status_device_text(self.lib_env)
+ )
+ mock_status.assert_not_called()
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ def test_success(self, mock_status):
+ mock_status.return_value = "status text"
+ self.assertEqual(
+ lib.status_device_text(self.lib_env),
+ "status text"
+ )
+ mock_status.assert_called_once_with("mock_runner", False)
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ def test_success_verbose(self, mock_status):
+ mock_status.return_value = "status text"
+ self.assertEqual(
+ lib.status_device_text(self.lib_env, True),
+ "status text"
+ )
+ mock_status.assert_called_once_with("mock_runner", True)
+
+
@mock.patch.object(LibraryEnvironment, "push_corosync_conf")
@mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+ at mock.patch("pcs.lib.commands.quorum._add_device_model_net")
+ at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_enable")
+ at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_start")
class AddDeviceTest(TestCase, CmanMixin):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
- def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
+ def test_disabled_on_cman(
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
+ ):
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assert_disabled_on_cman(
lambda: lib.add_device(lib_env, "net", {"host": "127.0.0.1"}, {})
)
mock_get_corosync.assert_not_called()
mock_push_corosync.assert_not_called()
+ mock_add_net.assert_not_called()
+ mock_client_enable.assert_not_called()
+ mock_client_start.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
def test_enabled_on_cman_if_not_live(
- self, mock_get_corosync, mock_push_corosync
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
@@ -287,9 +369,15 @@ class AddDeviceTest(TestCase, CmanMixin):
self.assertEqual(1, mock_get_corosync.call_count)
self.assertEqual(0, mock_push_corosync.call_count)
+ mock_add_net.assert_not_called()
+ mock_client_enable.assert_not_called()
+ mock_client_start.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_success(self, mock_get_corosync, mock_push_corosync):
+ def test_success(
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
+ ):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -311,6 +399,70 @@ class AddDeviceTest(TestCase, CmanMixin):
device {
timeout: 12345
model: net
+ votes: 1
+
+ net {
+ algorithm: ffsplit
+ host: 127.0.0.1
+ }
+ }
+"""
+ )
+ )
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.SERVICE_ENABLE_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.SERVICE_START_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ ]
+ )
+ self.assertEqual(1, len(mock_add_net.mock_calls))
+ self.assertEqual(3, len(mock_client_enable.mock_calls))
+ self.assertEqual(3, len(mock_client_start.mock_calls))
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ def test_success_file(
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
+ ):
+ original_conf = open(rc("corosync-3nodes.conf")).read()
+ mock_get_corosync.return_value = original_conf
+ lib_env = LibraryEnvironment(
+ self.mock_logger,
+ self.mock_reporter,
+ corosync_conf_data=original_conf
+ )
+
+ lib.add_device(
+ lib_env,
+ "net",
+ {"host": "127.0.0.1", "algorithm": "ffsplit"},
+ {"timeout": "12345"}
+ )
+
+ self.assertEqual(1, len(mock_push_corosync.mock_calls))
+ ac(
+ mock_push_corosync.mock_calls[0][1][0].config.export(),
+ original_conf.replace(
+ "provider: corosync_votequorum\n",
+ """provider: corosync_votequorum
+
+ device {
+ timeout: 12345
+ model: net
+ votes: 1
net {
algorithm: ffsplit
@@ -321,9 +473,15 @@ class AddDeviceTest(TestCase, CmanMixin):
)
)
self.assertEqual([], self.mock_reporter.report_item_list)
+ mock_add_net.assert_not_called()
+ mock_client_enable.assert_not_called()
+ mock_client_start.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_invalid_options(self, mock_get_corosync, mock_push_corosync):
+ def test_invalid_options(
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
+ ):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -349,9 +507,15 @@ class AddDeviceTest(TestCase, CmanMixin):
self.assertEqual(1, mock_get_corosync.call_count)
self.assertEqual(0, mock_push_corosync.call_count)
+ mock_add_net.assert_not_called()
+ mock_client_enable.assert_not_called()
+ mock_client_start.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_invalid_options_forced(self, mock_get_corosync, mock_push_corosync):
+ def test_invalid_options_forced(
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
+ ):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -375,7 +539,21 @@ class AddDeviceTest(TestCase, CmanMixin):
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
}
- )
+ ),
+ (
+ severity.INFO,
+ report_codes.SERVICE_ENABLE_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.SERVICE_START_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
]
)
self.assertEqual(1, mock_get_corosync.call_count)
@@ -389,6 +567,7 @@ class AddDeviceTest(TestCase, CmanMixin):
device {
bad_option: bad_value
model: net
+ votes: 1
net {
algorithm: ffsplit
@@ -398,9 +577,15 @@ class AddDeviceTest(TestCase, CmanMixin):
"""
)
)
+ self.assertEqual(1, len(mock_add_net.mock_calls))
+ self.assertEqual(3, len(mock_client_enable.mock_calls))
+ self.assertEqual(3, len(mock_client_start.mock_calls))
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_invalid_model(self, mock_get_corosync, mock_push_corosync):
+ def test_invalid_model(
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
+ ):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -421,9 +606,15 @@ class AddDeviceTest(TestCase, CmanMixin):
self.assertEqual(1, mock_get_corosync.call_count)
self.assertEqual(0, mock_push_corosync.call_count)
+ mock_add_net.assert_not_called()
+ mock_client_enable.assert_not_called()
+ mock_client_start.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_invalid_model_forced(self, mock_get_corosync, mock_push_corosync):
+ def test_invalid_model_forced(
+ self, mock_client_start, mock_client_enable, mock_add_net,
+ mock_get_corosync, mock_push_corosync
+ ):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -441,7 +632,21 @@ class AddDeviceTest(TestCase, CmanMixin):
"option_value": "bad model",
"allowed_values": ("net", ),
},
- )
+ ),
+ (
+ severity.INFO,
+ report_codes.SERVICE_ENABLE_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.SERVICE_START_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
]
)
self.assertEqual(1, mock_get_corosync.call_count)
@@ -458,25 +663,678 @@ class AddDeviceTest(TestCase, CmanMixin):
"""
)
)
+ mock_add_net.assert_not_called() # invalid model - don't setup net model
+ self.assertEqual(3, len(mock_client_enable.mock_calls))
+ self.assertEqual(3, len(mock_client_start.mock_calls))
+
+
+ at mock.patch(
+ "pcs.lib.commands.quorum.qdevice_net.remote_client_import_certificate_and_key"
+)
+ at mock.patch("pcs.lib.commands.quorum.qdevice_net.client_cert_request_to_pk12")
+ at mock.patch(
+ "pcs.lib.commands.quorum.qdevice_net.remote_sign_certificate_request"
+)
+ at mock.patch(
+ "pcs.lib.commands.quorum.qdevice_net.client_generate_certificate_request"
+)
+ at mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_setup")
+ at mock.patch(
+ "pcs.lib.commands.quorum.qdevice_net.remote_qdevice_get_ca_certificate"
+)
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+ at mock.patch.object(
+ LibraryEnvironment,
+ "node_communicator",
+ lambda self: "mock_communicator"
+)
+class AddDeviceNetTest(TestCase):
+ #pylint: disable=too-many-instance-attributes
+ def setUp(self):
+ self.mock_logger = mock.MagicMock(logging.Logger)
+ self.mock_reporter = MockLibraryReportProcessor()
+ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ self.qnetd_host = "qnetd_host"
+ self.cluster_name = "clusterName"
+ self.nodes = NodeAddressesList([
+ NodeAddresses("node1"),
+ NodeAddresses("node2"),
+ ])
+ self.ca_cert = "CA certificate"
+ self.cert_request = "client certificate request"
+ self.signed_cert = "signed certificate"
+ self.final_cert = "final client certificate"
+
+ def test_success(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.return_value = self.final_cert
+ skip_offline_nodes = False
+
+ lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ {
+ "node": self.nodes[0].label
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ {
+ "node": self.nodes[1].label
+ }
+ ),
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+ mock_get_cert_request.assert_called_once_with(
+ "mock_runner",
+ self.cluster_name
+ )
+ mock_sign_cert_request.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host,
+ self.cert_request,
+ self.cluster_name
+ )
+ mock_cert_to_pk12.assert_called_once_with(
+ "mock_runner",
+ self.signed_cert
+ )
+ client_import_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.final_cert),
+ mock.call("mock_communicator", self.nodes[1], self.final_cert),
+ ]
+ self.assertEqual(
+ len(client_import_calls),
+ len(mock_import_cert.mock_calls)
+ )
+ mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
+
+ def test_error_get_ca_cert(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.side_effect = NodeCommunicationException(
+ "host", "command", "reason"
+ )
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.return_value = self.final_cert
+ skip_offline_nodes = False
+
+ assert_raise_library_error(
+ lambda: lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {}
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ )
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ mock_client_setup.assert_not_called()
+ mock_get_cert_request.assert_not_called()
+ mock_sign_cert_request.assert_not_called()
+ mock_cert_to_pk12.assert_not_called()
+ mock_import_cert.assert_not_called()
+
+
+ def test_error_client_setup(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ def raiser(communicator, node, cert):
+ if node == self.nodes[1]:
+ raise NodeCommunicationException("host", "command", "reason")
+ mock_client_setup.side_effect = raiser
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.return_value = self.final_cert
+ skip_offline_nodes = False
+
+ assert_raise_library_error(
+ lambda: lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {},
+ report_codes.SKIP_OFFLINE_NODES
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {},
+ report_codes.SKIP_OFFLINE_NODES
+ ),
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+
+ def test_error_client_setup_skip_offline(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ def raiser(communicator, node, cert):
+ if node == self.nodes[1]:
+ raise NodeCommunicationException("host", "command", "reason")
+ mock_client_setup.side_effect = raiser
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.return_value = self.final_cert
+ skip_offline_nodes = True
+
+ lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ ),
+ (
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {}
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ {
+ "node": self.nodes[0].label
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ {
+ "node": self.nodes[1].label
+ }
+ ),
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+
+ def test_generate_cert_request_error(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ mock_get_cert_request.side_effect = LibraryError()
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.return_value = self.final_cert
+ skip_offline_nodes = False
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ )
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+ mock_get_cert_request.assert_called_once_with(
+ "mock_runner",
+ self.cluster_name
+ )
+ mock_sign_cert_request.assert_not_called()
+ mock_cert_to_pk12.assert_not_called()
+ mock_import_cert.assert_not_called()
+
+ def test_sign_certificate_error(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.side_effect = NodeCommunicationException(
+ "host", "command", "reason"
+ )
+ mock_cert_to_pk12.return_value = self.final_cert
+ skip_offline_nodes = False
+
+ assert_raise_library_error(
+ lambda: lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {}
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ )
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+ mock_get_cert_request.assert_called_once_with(
+ "mock_runner",
+ self.cluster_name
+ )
+ mock_sign_cert_request.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host,
+ self.cert_request,
+ self.cluster_name
+ )
+ mock_cert_to_pk12.assert_not_called()
+ mock_import_cert.assert_not_called()
+
+ def test_certificate_to_pk12_error(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.side_effect = LibraryError()
+ skip_offline_nodes = False
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ )
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+ mock_get_cert_request.assert_called_once_with(
+ "mock_runner",
+ self.cluster_name
+ )
+ mock_sign_cert_request.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host,
+ self.cert_request,
+ self.cluster_name
+ )
+ mock_cert_to_pk12.assert_called_once_with(
+ "mock_runner",
+ self.signed_cert
+ )
+ mock_import_cert.assert_not_called()
+
+ def test_client_import_cert_error(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.return_value = self.final_cert
+ def raiser(communicator, node, cert):
+ if node == self.nodes[1]:
+ raise NodeCommunicationException("host", "command", "reason")
+ mock_import_cert.side_effect = raiser
+ skip_offline_nodes = False
+
+ assert_raise_library_error(
+ lambda: lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {},
+ report_codes.SKIP_OFFLINE_NODES
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ {
+ "node": self.nodes[0].label
+ }
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {},
+ report_codes.SKIP_OFFLINE_NODES
+ ),
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+ mock_get_cert_request.assert_called_once_with(
+ "mock_runner",
+ self.cluster_name
+ )
+ mock_sign_cert_request.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host,
+ self.cert_request,
+ self.cluster_name
+ )
+ mock_cert_to_pk12.assert_called_once_with(
+ "mock_runner",
+ self.signed_cert
+ )
+ client_import_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.final_cert),
+ mock.call("mock_communicator", self.nodes[1], self.final_cert),
+ ]
+ self.assertEqual(
+ len(client_import_calls),
+ len(mock_import_cert.mock_calls)
+ )
+ mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
+
+ def test_client_import_cert_error_skip_offline(
+ self, mock_get_ca, mock_client_setup, mock_get_cert_request,
+ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
+ ):
+ mock_get_ca.return_value = self.ca_cert
+ mock_get_cert_request.return_value = self.cert_request
+ mock_sign_cert_request.return_value = self.signed_cert
+ mock_cert_to_pk12.return_value = self.final_cert
+ def raiser(communicator, node, cert):
+ if node == self.nodes[1]:
+ raise NodeCommunicationException("host", "command", "reason")
+ mock_import_cert.side_effect = raiser
+ skip_offline_nodes = True
+
+ lib._add_device_model_net(
+ self.lib_env,
+ self.qnetd_host,
+ self.cluster_name,
+ self.nodes,
+ skip_offline_nodes
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
+ {}
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ {
+ "node": self.nodes[0].label
+ }
+ ),
+ (
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {}
+ ),
+ ]
+ )
+ mock_get_ca.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host
+ )
+ client_setup_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.ca_cert),
+ mock.call("mock_communicator", self.nodes[1], self.ca_cert),
+ ]
+ self.assertEqual(
+ len(client_setup_calls),
+ len(mock_client_setup.mock_calls)
+ )
+ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
+ mock_get_cert_request.assert_called_once_with(
+ "mock_runner",
+ self.cluster_name
+ )
+ mock_sign_cert_request.assert_called_once_with(
+ "mock_communicator",
+ self.qnetd_host,
+ self.cert_request,
+ self.cluster_name
+ )
+ mock_cert_to_pk12.assert_called_once_with(
+ "mock_runner",
+ self.signed_cert
+ )
+ client_import_calls = [
+ mock.call("mock_communicator", self.nodes[0], self.final_cert),
+ mock.call("mock_communicator", self.nodes[1], self.final_cert),
+ ]
+ self.assertEqual(
+ len(client_import_calls),
+ len(mock_import_cert.mock_calls)
+ )
+ mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
@mock.patch.object(LibraryEnvironment, "push_corosync_conf")
@mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+ at mock.patch("pcs.lib.commands.quorum._remove_device_model_net")
+ at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_disable")
+ at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_stop")
class RemoveDeviceTest(TestCase, CmanMixin):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
- def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
+ def test_disabled_on_cman(
+ self, mock_remote_stop, mock_remote_disable, mock_remove_net,
+ mock_get_corosync, mock_push_corosync
+ ):
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assert_disabled_on_cman(lambda: lib.remove_device(lib_env))
mock_get_corosync.assert_not_called()
mock_push_corosync.assert_not_called()
+ mock_remove_net.assert_not_called()
+ mock_remote_disable.assert_not_called()
+ mock_remote_stop.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
def test_enabled_on_cman_if_not_live(
- self, mock_get_corosync, mock_push_corosync
+ self, mock_remote_stop, mock_remote_disable, mock_remove_net,
+ mock_get_corosync, mock_push_corosync
):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
@@ -495,9 +1353,17 @@ class RemoveDeviceTest(TestCase, CmanMixin):
)
)
+ self.assertEqual(1, mock_get_corosync.call_count)
+ self.assertEqual(0, mock_push_corosync.call_count)
+ mock_remove_net.assert_not_called()
+ mock_remote_disable.assert_not_called()
+ mock_remote_stop.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_no_device(self, mock_get_corosync, mock_push_corosync):
+ def test_no_device(
+ self, mock_remote_stop, mock_remote_disable, mock_remove_net,
+ mock_get_corosync, mock_push_corosync
+ ):
original_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -511,10 +1377,17 @@ class RemoveDeviceTest(TestCase, CmanMixin):
)
)
- mock_push_corosync.assert_not_called()
+ self.assertEqual(1, mock_get_corosync.call_count)
+ self.assertEqual(0, mock_push_corosync.call_count)
+ mock_remove_net.assert_not_called()
+ mock_remote_disable.assert_not_called()
+ mock_remote_stop.assert_not_called()
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_success(self, mock_get_corosync, mock_push_corosync):
+ def test_success(
+ self, mock_remote_stop, mock_remote_disable, mock_remove_net,
+ mock_get_corosync, mock_push_corosync
+ ):
original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
no_device_conf = open(rc("corosync-3nodes.conf")).read()
mock_get_corosync.return_value = original_conf
@@ -527,7 +1400,222 @@ class RemoveDeviceTest(TestCase, CmanMixin):
mock_push_corosync.mock_calls[0][1][0].config.export(),
no_device_conf
)
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.SERVICE_DISABLE_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.SERVICE_STOP_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ ]
+ )
+ self.assertEqual(1, len(mock_remove_net.mock_calls))
+ self.assertEqual(3, len(mock_remote_disable.mock_calls))
+ self.assertEqual(3, len(mock_remote_stop.mock_calls))
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ def test_success_file(
+ self, mock_remote_stop, mock_remote_disable, mock_remove_net,
+ mock_get_corosync, mock_push_corosync
+ ):
+ original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
+ no_device_conf = open(rc("corosync-3nodes.conf")).read()
+ mock_get_corosync.return_value = original_conf
+ lib_env = LibraryEnvironment(
+ self.mock_logger,
+ self.mock_reporter,
+ corosync_conf_data=original_conf
+ )
+
+ lib.remove_device(lib_env)
+
+ self.assertEqual(1, len(mock_push_corosync.mock_calls))
+ ac(
+ mock_push_corosync.mock_calls[0][1][0].config.export(),
+ no_device_conf
+ )
self.assertEqual([], self.mock_reporter.report_item_list)
+ mock_remove_net.assert_not_called()
+ mock_remote_disable.assert_not_called()
+ mock_remote_stop.assert_not_called()
+
+
+ at mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy")
+ at mock.patch.object(
+ LibraryEnvironment,
+ "node_communicator",
+ lambda self: "mock_communicator"
+)
+class RemoveDeviceNetTest(TestCase):
+ def setUp(self):
+ self.mock_logger = mock.MagicMock(logging.Logger)
+ self.mock_reporter = MockLibraryReportProcessor()
+ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ self.nodes = NodeAddressesList([
+ NodeAddresses("node1"),
+ NodeAddresses("node2"),
+ ])
+
+ def test_success(self, mock_client_destroy):
+ skip_offline_nodes = False
+
+ lib._remove_device_model_net(
+ self.lib_env,
+ self.nodes,
+ skip_offline_nodes
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
+ {}
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ {
+ "node": self.nodes[0].label
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ {
+ "node": self.nodes[1].label
+ }
+ ),
+ ]
+ )
+ client_destroy_calls = [
+ mock.call("mock_communicator", self.nodes[0]),
+ mock.call("mock_communicator", self.nodes[1]),
+ ]
+ self.assertEqual(
+ len(client_destroy_calls),
+ len(mock_client_destroy.mock_calls)
+ )
+ mock_client_destroy.assert_has_calls(
+ client_destroy_calls,
+ any_order=True
+ )
+
+ def test_error_client_destroy(self, mock_client_destroy):
+ def raiser(communicator, node):
+ if node == self.nodes[1]:
+ raise NodeCommunicationException("host", "command", "reason")
+ mock_client_destroy.side_effect = raiser
+ skip_offline_nodes = False
+
+ assert_raise_library_error(
+ lambda: lib._remove_device_model_net(
+ self.lib_env,
+ self.nodes,
+ skip_offline_nodes
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {},
+ report_codes.SKIP_OFFLINE_NODES
+ )
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
+ {}
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ {
+ "node": self.nodes[0].label
+ }
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {},
+ report_codes.SKIP_OFFLINE_NODES
+ ),
+ ]
+ )
+ client_destroy_calls = [
+ mock.call("mock_communicator", self.nodes[0]),
+ mock.call("mock_communicator", self.nodes[1]),
+ ]
+ self.assertEqual(
+ len(client_destroy_calls),
+ len(mock_client_destroy.mock_calls)
+ )
+ mock_client_destroy.assert_has_calls(
+ client_destroy_calls,
+ any_order=True
+ )
+
+ def test_error_client_destroy_skip_offline(self, mock_client_destroy):
+ def raiser(communicator, node):
+ if node == self.nodes[1]:
+ raise NodeCommunicationException("host", "command", "reason")
+ mock_client_destroy.side_effect = raiser
+ skip_offline_nodes = True
+
+ lib._remove_device_model_net(
+ self.lib_env,
+ self.nodes,
+ skip_offline_nodes
+ )
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
+ {}
+ ),
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ {
+ "node": self.nodes[0].label
+ }
+ ),
+ (
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {}
+ ),
+ ]
+ )
+ client_destroy_calls = [
+ mock.call("mock_communicator", self.nodes[0]),
+ mock.call("mock_communicator", self.nodes[1]),
+ ]
+ self.assertEqual(
+ len(client_destroy_calls),
+ len(mock_client_destroy.mock_calls)
+ )
+ mock_client_destroy.assert_has_calls(
+ client_destroy_calls,
+ any_order=True
+ )
@mock.patch.object(LibraryEnvironment, "push_corosync_conf")
@@ -671,3 +1759,46 @@ class UpdateDeviceTest(TestCase, CmanMixin):
"model: net\n bad_option: bad_value"
)
)
+
+
+ at mock.patch("pcs.lib.commands.quorum.corosync_live.set_expected_votes")
+ at mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock_runner"
+)
+class SetExpectedVotesLiveTest(TestCase, CmanMixin):
+ def setUp(self):
+ self.mock_logger = mock.MagicMock(logging.Logger)
+ self.mock_reporter = MockLibraryReportProcessor()
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+ def test_disabled_on_cman(self, mock_set_votes):
+ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ self.assert_disabled_on_cman(
+ lambda: lib.set_expected_votes_live(lib_env, "5")
+ )
+ mock_set_votes.assert_not_called()
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ def test_success(self, mock_set_votes):
+ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ lib.set_expected_votes_live(lib_env, "5")
+ mock_set_votes.assert_called_once_with("mock_runner", 5)
+
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ def test_invalid_votes(self, mock_set_votes):
+ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ assert_raise_library_error(
+ lambda: lib.set_expected_votes_live(lib_env, "-5"),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "expected votes",
+ "option_value": "-5",
+ "allowed_values": "positive integer",
+ }
+ )
+ )
+ mock_set_votes.assert_not_called()
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
index 5700016..4a35fd9 100644
--- a/pcs/test/test_lib_corosync_config_facade.py
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -31,6 +31,7 @@ class FromStringTest(TestCase):
self.assertEqual(facade.__class__, lib.ConfigFacade)
self.assertEqual(facade.config.export(), config)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_parse_error_missing_brace(self):
config = "section {"
@@ -55,6 +56,43 @@ class FromStringTest(TestCase):
)
+class GetClusterNametest(TestCase):
+ def test_no_name(self):
+ config = ""
+ facade = lib.ConfigFacade.from_string(config)
+ self.assertEqual("", facade.get_cluster_name())
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
+
+ def test_empty_name(self):
+ config = "totem {\n cluster_name:\n}\n"
+ facade = lib.ConfigFacade.from_string(config)
+ self.assertEqual("", facade.get_cluster_name())
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
+
+ def test_one_name(self):
+ config = "totem {\n cluster_name: test\n}\n"
+ facade = lib.ConfigFacade.from_string(config)
+ self.assertEqual("test", facade.get_cluster_name())
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
+
+ def test_more_names(self):
+ config = "totem {\n cluster_name: test\n cluster_name: TEST\n}\n"
+ facade = lib.ConfigFacade.from_string(config)
+ self.assertEqual("TEST", facade.get_cluster_name())
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
+
+ def test_more_sections(self):
+ config = "totem{\ncluster_name:test\n}\ntotem{\ncluster_name:TEST\n}\n"
+ facade = lib.ConfigFacade.from_string(config)
+ self.assertEqual("TEST", facade.get_cluster_name())
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
+
+
class GetNodesTest(TestCase):
def assert_equal_nodelist(self, expected_nodes, real_nodelist):
real_nodes = [
@@ -69,6 +107,7 @@ class GetNodesTest(TestCase):
nodes = facade.get_nodes()
self.assertEqual(0, len(nodes))
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_empty_nodelist(self):
config = """\
@@ -79,6 +118,7 @@ nodelist {
nodes = facade.get_nodes()
self.assertEqual(0, len(nodes))
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_one_nodelist(self):
config = """\
@@ -107,6 +147,7 @@ nodelist {
nodes
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_more_nodelists(self):
config = """\
@@ -137,6 +178,7 @@ nodelist {
nodes
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
class GetQuorumOptionsTest(TestCase):
@@ -146,6 +188,7 @@ class GetQuorumOptionsTest(TestCase):
options = facade.get_quorum_options()
self.assertEqual({}, options)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_empty_quorum(self):
config = """\
@@ -156,6 +199,7 @@ quorum {
options = facade.get_quorum_options()
self.assertEqual({}, options)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_no_options(self):
config = """\
@@ -167,6 +211,7 @@ quorum {
options = facade.get_quorum_options()
self.assertEqual({}, options)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_some_options(self):
config = """\
@@ -191,6 +236,7 @@ quorum {
options
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_option_repeated(self):
config = """\
@@ -208,6 +254,7 @@ quorum {
options
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_quorum_repeated(self):
config = """\
@@ -231,6 +278,7 @@ quorum {
options
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
class SetQuorumOptionsTest(TestCase):
@@ -247,6 +295,7 @@ class SetQuorumOptionsTest(TestCase):
facade = lib.ConfigFacade.from_string(config)
facade.set_quorum_options(reporter, {"wait_for_all": "0"})
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
"""\
quorum {
@@ -263,6 +312,7 @@ quorum {
facade = lib.ConfigFacade.from_string(config)
facade.set_quorum_options(reporter, {"wait_for_all": ""})
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual("", facade.config.export())
self.assertEqual([], reporter.report_item_list)
@@ -279,6 +329,7 @@ quorum {
facade.set_quorum_options(reporter, expected_options)
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
test_facade = lib.ConfigFacade.from_string(facade.config.export())
self.assertEqual(
expected_options,
@@ -309,6 +360,7 @@ quorum {
)
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
test_facade = lib.ConfigFacade.from_string(facade.config.export())
self.assertEqual(
{
@@ -329,6 +381,7 @@ quorum {
facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"})
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
"1",
facade.get_quorum_options().get("auto_tie_breaker", None)
@@ -347,6 +400,7 @@ quorum {
facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"})
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
"0",
facade.get_quorum_options().get("auto_tie_breaker", None)
@@ -365,6 +419,7 @@ quorum {
facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"})
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
"1",
facade.get_quorum_options().get("auto_tie_breaker", None)
@@ -383,6 +438,7 @@ quorum {
facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"})
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
"0",
facade.get_quorum_options().get("auto_tie_breaker", None)
@@ -421,6 +477,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
lib.ConfigFacade.from_string(config).get_quorum_options(),
facade.get_quorum_options()
@@ -476,6 +533,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
lib.ConfigFacade.from_string(config).get_quorum_options(),
facade.get_quorum_options()
@@ -522,11 +580,60 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
+ self.assertEqual(
+ lib.ConfigFacade.from_string(config).get_quorum_options(),
+ facade.get_quorum_options()
+ )
+
+ def test_qdevice_incompatible_options(self):
+ config = open(rc("corosync-3nodes-qdevice.conf")).read()
+ reporter = MockLibraryReportProcessor()
+ facade = lib.ConfigFacade.from_string(config)
+ options = {
+ "auto_tie_breaker": "1",
+ "last_man_standing": "1",
+ "last_man_standing_window": "250",
+ }
+ assert_raise_library_error(
+ lambda: facade.set_quorum_options(reporter, options),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE,
+ {
+ "options_names": [
+ "auto_tie_breaker",
+ "last_man_standing",
+ "last_man_standing_window",
+ ],
+ }
+ )
+ )
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual(
lib.ConfigFacade.from_string(config).get_quorum_options(),
facade.get_quorum_options()
)
+ def test_qdevice_compatible_options(self):
+ config = open(rc("corosync-3nodes-qdevice.conf")).read()
+ reporter = MockLibraryReportProcessor()
+ facade = lib.ConfigFacade.from_string(config)
+ expected_options = {
+ "wait_for_all": "1",
+ }
+ facade.set_quorum_options(reporter, expected_options)
+
+ self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
+ test_facade = lib.ConfigFacade.from_string(facade.config.export())
+ self.assertEqual(
+ expected_options,
+ test_facade.get_quorum_options()
+ )
+ self.assertEqual([], reporter.report_item_list)
+
class HasQuorumDeviceTest(TestCase):
def test_empty_config(self):
@@ -534,12 +641,14 @@ class HasQuorumDeviceTest(TestCase):
facade = lib.ConfigFacade.from_string(config)
self.assertFalse(facade.has_quorum_device())
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_no_device(self):
config = open(rc("corosync.conf")).read()
facade = lib.ConfigFacade.from_string(config)
self.assertFalse(facade.has_quorum_device())
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_empty_device(self):
config = """\
@@ -551,6 +660,7 @@ quorum {
facade = lib.ConfigFacade.from_string(config)
self.assertFalse(facade.has_quorum_device())
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_device_set(self):
config = """\
@@ -563,6 +673,7 @@ quorum {
facade = lib.ConfigFacade.from_string(config)
self.assertTrue(facade.has_quorum_device())
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_no_model(self):
config = """\
@@ -578,6 +689,7 @@ quorum {
facade = lib.ConfigFacade.from_string(config)
self.assertFalse(facade.has_quorum_device())
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
class GetQuorumDeviceSettingsTest(TestCase):
@@ -589,6 +701,7 @@ class GetQuorumDeviceSettingsTest(TestCase):
facade.get_quorum_device_settings()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_no_device(self):
config = open(rc("corosync.conf")).read()
@@ -598,6 +711,7 @@ class GetQuorumDeviceSettingsTest(TestCase):
facade.get_quorum_device_settings()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_empty_device(self):
config = """\
@@ -612,6 +726,7 @@ quorum {
facade.get_quorum_device_settings()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_no_model(self):
config = """\
@@ -630,6 +745,7 @@ quorum {
facade.get_quorum_device_settings()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_configured_properly(self):
config = """\
@@ -649,6 +765,7 @@ quorum {
facade.get_quorum_device_settings()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_more_devices_one_quorum(self):
config = """\
@@ -681,6 +798,7 @@ quorum {
facade.get_quorum_device_settings()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_more_devices_more_quorum(self):
config = """\
@@ -715,6 +833,7 @@ quorum {
facade.get_quorum_device_settings()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
class AddQuorumDeviceTest(TestCase):
@@ -754,9 +873,10 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
- def test_success_net_minimal(self):
+ def test_success_net_minimal_ffsplit(self):
config = open(rc("corosync-3nodes.conf")).read()
reporter = MockLibraryReportProcessor()
facade = lib.ConfigFacade.from_string(config)
@@ -774,6 +894,7 @@ quorum {
device {
model: net
+ votes: 1
net {
algorithm: ffsplit
@@ -784,55 +905,10 @@ quorum {
facade.config.export()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual([], reporter.report_item_list)
- def test_success_net_full(self):
- config = open(rc("corosync-3nodes.conf")).read()
- reporter = MockLibraryReportProcessor()
- facade = lib.ConfigFacade.from_string(config)
- facade.add_quorum_device(
- reporter,
- "net",
- {
- "host": "127.0.0.1",
- "port": "4433",
- "algorithm": "ffsplit",
- "connect_timeout": "12345",
- "force_ip_version": "4",
- "tie_breaker": "lowest",
- },
- {
- "timeout": "23456",
- "sync_timeout": "34567"
- }
- )
- ac(
- config.replace(
- " provider: corosync_votequorum",
- """\
- provider: corosync_votequorum
-
- device {
- sync_timeout: 34567
- timeout: 23456
- model: net
-
- net {
- algorithm: ffsplit
- connect_timeout: 12345
- force_ip_version: 4
- host: 127.0.0.1
- port: 4433
- tie_breaker: lowest
- }
- }"""
- ),
- facade.config.export()
- )
- self.assertFalse(facade.need_stopped_cluster)
- self.assertEqual([], reporter.report_item_list)
-
- def test_succes_net_lms_3node(self):
+ def test_success_net_minimal_lms(self):
config = open(rc("corosync-3nodes.conf")).read()
reporter = MockLibraryReportProcessor()
facade = lib.ConfigFacade.from_string(config)
@@ -860,16 +936,18 @@ quorum {
facade.config.export()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual([], reporter.report_item_list)
- def test_succes_net_2nodelms_3node(self):
+ def test_success_remove_nodes_votes(self):
config = open(rc("corosync-3nodes.conf")).read()
+ config_votes = config.replace("node {", "node {\nquorum_votes: 2")
reporter = MockLibraryReportProcessor()
- facade = lib.ConfigFacade.from_string(config)
+ facade = lib.ConfigFacade.from_string(config_votes)
facade.add_quorum_device(
reporter,
"net",
- {"host": "127.0.0.1", "algorithm": "2nodelms"},
+ {"host": "127.0.0.1", "algorithm": "lms"},
{}
)
ac(
@@ -890,47 +968,28 @@ quorum {
facade.config.export()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual([], reporter.report_item_list)
- def test_succes_net_lms_2node(self):
- config = open(rc("corosync.conf")).read()
- reporter = MockLibraryReportProcessor()
- facade = lib.ConfigFacade.from_string(config)
- facade.add_quorum_device(
- reporter,
- "net",
- {"host": "127.0.0.1", "algorithm": "lms"},
- {}
- )
- ac(
- config.replace(
- " provider: corosync_votequorum",
- """\
- provider: corosync_votequorum
-
- device {
- model: net
-
- net {
- algorithm: 2nodelms
- host: 127.0.0.1
- }
- }"""
- ).replace(" two_node: 1\n", ""),
- facade.config.export()
- )
- self.assertFalse(facade.need_stopped_cluster)
- self.assertEqual([], reporter.report_item_list)
-
- def test_succes_net_2nodelms_2node(self):
- config = open(rc("corosync.conf")).read()
+ def test_success_net_full(self):
+ config = open(rc("corosync-3nodes.conf")).read()
reporter = MockLibraryReportProcessor()
facade = lib.ConfigFacade.from_string(config)
facade.add_quorum_device(
reporter,
"net",
- {"host": "127.0.0.1", "algorithm": "2nodelms"},
- {}
+ {
+ "host": "127.0.0.1",
+ "port": "4433",
+ "algorithm": "ffsplit",
+ "connect_timeout": "12345",
+ "force_ip_version": "4",
+ "tie_breaker": "lowest",
+ },
+ {
+ "timeout": "23456",
+ "sync_timeout": "34567"
+ }
)
ac(
config.replace(
@@ -939,17 +998,25 @@ quorum {
provider: corosync_votequorum
device {
+ sync_timeout: 34567
+ timeout: 23456
model: net
+ votes: 1
net {
- algorithm: 2nodelms
+ algorithm: ffsplit
+ connect_timeout: 12345
+ force_ip_version: 4
host: 127.0.0.1
+ port: 4433
+ tie_breaker: lowest
}
}"""
- ).replace(" two_node: 1\n", ""),
+ ),
facade.config.export()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual([], reporter.report_item_list)
def test_remove_conflicting_options(self):
@@ -982,6 +1049,7 @@ quorum {
device {
model: net
+ votes: 1
net {
algorithm: ffsplit
@@ -994,6 +1062,7 @@ quorum {
facade.config.export()
)
self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual([], reporter.report_item_list)
def test_remove_old_configuration(self):
@@ -1030,6 +1099,7 @@ quorum {
device {
model: net
+ votes: 1
net {
algorithm: ffsplit
@@ -1042,6 +1112,7 @@ quorum {
facade.config.export()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
self.assertEqual([], reporter.report_item_list)
def test_bad_model(self):
@@ -1062,6 +1133,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_bad_model_forced(self):
@@ -1082,6 +1154,7 @@ quorum {
facade.config.export()
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
assert_report_item_list_equal(
reporter.report_item_list,
[
@@ -1115,6 +1188,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_bad_options_net(self):
@@ -1147,7 +1221,7 @@ quorum {
{
"option_name": "algorithm",
"option_value": "bad algorithm",
- "allowed_values": ("2nodelms", "ffsplit", "lms"),
+ "allowed_values": ("ffsplit", "lms"),
},
report_codes.FORCE_OPTIONS
),
@@ -1254,6 +1328,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_mandatory_options_missing_net_forced(self):
@@ -1277,6 +1352,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_mandatory_options_empty_net_forced(self):
@@ -1300,6 +1376,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_bad_options_net_forced(self):
@@ -1326,6 +1403,7 @@ quorum {
force_options=True
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(
config.replace(
" provider: corosync_votequorum",
@@ -1360,7 +1438,7 @@ quorum {
{
"option_name": "algorithm",
"option_value": "bad algorithm",
- "allowed_values": ("2nodelms", "ffsplit", "lms"),
+ "allowed_values": ("ffsplit", "lms"),
}
),
(
@@ -1445,9 +1523,52 @@ quorum {
]
)
+ def test_bad_options_net_disallowed_algorithms(self):
+ config = open(rc("corosync-3nodes.conf")).read()
+ reporter = MockLibraryReportProcessor()
+ facade = lib.ConfigFacade.from_string(config)
+ assert_raise_library_error(
+ lambda: facade.add_quorum_device(
+ reporter,
+ "net",
+ {"host": "127.0.0.1", "algorithm": "test"},
+ {}
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "algorithm",
+ "option_value": "test",
+ "allowed_values": ("ffsplit", "lms"),
+ },
+ report_codes.FORCE_OPTIONS
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: facade.add_quorum_device(
+ reporter,
+ "net",
+ {"host": "127.0.0.1", "algorithm": "2nodelms"},
+ {}
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "algorithm",
+ "option_value": "2nodelms",
+ "allowed_values": ("ffsplit", "lms"),
+ },
+ report_codes.FORCE_OPTIONS
+ )
+ )
+
+
class UpdateQuorumDeviceTest(TestCase):
- def fixture_add_device(self, config):
- return re.sub(
+ def fixture_add_device(self, config, votes=None):
+ with_device = re.sub(
re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
"""\
quorum {
@@ -1465,6 +1586,12 @@ quorum {
}""",
config
)
+ if votes:
+ with_device = with_device.replace(
+ "model: net",
+ "model: net\n votes: {0}".format(votes)
+ )
+ return with_device
def test_not_existing(self):
config = open(rc("corosync.conf")).read()
@@ -1483,11 +1610,13 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_success_model_options_net(self):
config = self.fixture_add_device(
- open(rc("corosync-3nodes.conf")).read()
+ open(rc("corosync-3nodes.conf")).read(),
+ votes="1"
)
reporter = MockLibraryReportProcessor()
facade = lib.ConfigFacade.from_string(config)
@@ -1496,7 +1625,8 @@ quorum {
{"host": "127.0.0.2", "port": "", "algorithm": "ffsplit"},
{}
)
- self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertTrue(facade.need_qdevice_reload)
ac(
config.replace(
"host: 127.0.0.1\n port: 4433",
@@ -1506,27 +1636,6 @@ quorum {
)
self.assertEqual([], reporter.report_item_list)
- def test_success_net_3node_2nodelms(self):
- config = self.fixture_add_device(
- open(rc("corosync-3nodes.conf")).read()
- )
- reporter = MockLibraryReportProcessor()
- facade = lib.ConfigFacade.from_string(config)
- facade.update_quorum_device(
- reporter,
- {"algorithm": "2nodelms"},
- {}
- )
- self.assertTrue(facade.need_stopped_cluster)
- ac(
- config.replace(
- "port: 4433",
- "port: 4433\n algorithm: lms"
- ),
- facade.config.export()
- )
- self.assertEqual([], reporter.report_item_list)
-
def test_success_net_doesnt_require_host_and_algorithm(self):
config = self.fixture_add_device(
open(rc("corosync-3nodes.conf")).read()
@@ -1534,7 +1643,8 @@ quorum {
reporter = MockLibraryReportProcessor()
facade = lib.ConfigFacade.from_string(config)
facade.update_quorum_device(reporter, {"port": "4444"}, {})
- self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertTrue(facade.need_qdevice_reload)
ac(
config.replace(
"host: 127.0.0.1\n port: 4433",
@@ -1572,12 +1682,13 @@ quorum {
{
"option_name": "algorithm",
"option_value": "",
- "allowed_values": ("2nodelms", "ffsplit", "lms")
+ "allowed_values": ("ffsplit", "lms")
},
report_codes.FORCE_OPTIONS
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_net_required_options_cannot_be_removed_forced(self):
@@ -1605,6 +1716,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_bad_net_options(self):
@@ -1632,7 +1744,7 @@ quorum {
{
"option_name": "algorithm",
"option_value": "bad algorithm",
- "allowed_values": ("2nodelms", "ffsplit", "lms"),
+ "allowed_values": ("ffsplit", "lms"),
},
report_codes.FORCE_OPTIONS
),
@@ -1695,6 +1807,7 @@ quorum {
),
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_bad_net_options_forced(self):
@@ -1716,7 +1829,8 @@ quorum {
{},
force_options=True
)
- self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertTrue(facade.need_qdevice_reload)
ac(
config.replace(
" host: 127.0.0.1\n port: 4433",
@@ -1740,7 +1854,7 @@ quorum {
{
"option_name": "algorithm",
"option_value": "bad algorithm",
- "allowed_values": ("2nodelms", "ffsplit", "lms"),
+ "allowed_values": ("ffsplit", "lms"),
},
),
(
@@ -1809,7 +1923,8 @@ quorum {
{},
{"timeout": "", "sync_timeout": "23456"}
)
- self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertTrue(facade.need_qdevice_reload)
ac(
config.replace(
"timeout: 12345\n model: net",
@@ -1830,7 +1945,8 @@ quorum {
{"port": "4444"},
{"timeout": "23456"}
)
- self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertTrue(facade.need_qdevice_reload)
ac(
config
.replace("port: 4433", "port: 4444")
@@ -1898,6 +2014,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_bad_generic_options_cannot_force_model(self):
@@ -1924,6 +2041,7 @@ quorum {
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(config, facade.config.export())
def test_bad_generic_options_forced(self):
@@ -1942,7 +2060,8 @@ quorum {
},
force_options=True
)
- self.assertTrue(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_stopped_cluster)
+ self.assertTrue(facade.need_qdevice_reload)
ac(
config.replace(
" timeout: 12345\n model: net",
@@ -2001,6 +2120,7 @@ class RemoveQuorumDeviceTest(TestCase):
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_no_device(self):
config = open(rc("corosync-3nodes.conf")).read()
@@ -2014,6 +2134,7 @@ class RemoveQuorumDeviceTest(TestCase):
)
)
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
def test_remove_all_devices(self):
config_no_devices = open(rc("corosync-3nodes.conf")).read()
@@ -2054,6 +2175,7 @@ quorum {
facade = lib.ConfigFacade.from_string(config)
facade.remove_quorum_device()
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(
config_no_devices,
facade.config.export()
@@ -2082,6 +2204,7 @@ quorum {
facade = lib.ConfigFacade.from_string(config)
facade.remove_quorum_device()
self.assertFalse(facade.need_stopped_cluster)
+ self.assertFalse(facade.need_qdevice_reload)
ac(
config_no_devices,
facade.config.export()
diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
index 4878136..0fc5eb2 100644
--- a/pcs/test/test_lib_corosync_live.py
+++ b/pcs/test/test_lib_corosync_live.py
@@ -47,6 +47,22 @@ class GetLocalCorosyncConfTest(TestCase):
)
+class SetRemoteCorosyncConfTest(TestCase):
+ def test_success(self):
+ config = "test {\nconfig: data\n}\n"
+ node = NodeAddresses("node1")
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_node.return_value = "dummy return"
+
+ lib.set_remote_corosync_conf(mock_communicator, node, config)
+
+ mock_communicator.call_node.assert_called_once_with(
+ node,
+ "remote/set_corosync_conf",
+ "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A"
+ )
+
+
class ReloadConfigTest(TestCase):
def path(self, name):
return os.path.join(settings.corosync_binaries, name)
@@ -85,17 +101,85 @@ class ReloadConfigTest(TestCase):
])
-class SetRemoteCorosyncConfTest(TestCase):
+class GetQuorumStatusTextTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ self.quorum_tool = "/usr/sbin/corosync-quorumtool"
+
def test_success(self):
- config = "test {\nconfig: data\n}\n"
- node = NodeAddresses("node1")
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_node.return_value = "dummy return"
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.get_quorum_status_text(self.mock_runner)
+ )
+ self.mock_runner.run.assert_called_once_with([
+ self.quorum_tool, "-p"
+ ])
- lib.set_remote_corosync_conf(mock_communicator, node, config)
+ def test_success_with_retval_1(self):
+ self.mock_runner.run.return_value = ("status info", 1)
+ self.assertEqual(
+ "status info",
+ lib.get_quorum_status_text(self.mock_runner)
+ )
+ self.mock_runner.run.assert_called_once_with([
+ self.quorum_tool, "-p"
+ ])
- mock_communicator.call_node.assert_called_once_with(
- node,
- "remote/set_corosync_conf",
- "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A"
+ def test_error(self):
+ self.mock_runner.run.return_value = ("status error", 2)
+ assert_raise_library_error(
+ lambda: lib.get_quorum_status_text(self.mock_runner),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
+ {
+ "reason": "status error",
+ }
+ )
)
+ self.mock_runner.run.assert_called_once_with([
+ self.quorum_tool, "-p"
+ ])
+
+
+class SetExpectedVotesTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+ def path(self, name):
+ return os.path.join(settings.corosync_binaries, name)
+
+ def test_success(self):
+ cmd_retval = 0
+ cmd_output = "cmd output"
+ mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ mock_runner.run.return_value = (cmd_output, cmd_retval)
+
+ lib.set_expected_votes(mock_runner, 3)
+
+ mock_runner.run.assert_called_once_with([
+ self.path("corosync-quorumtool"), "-e", "3"
+ ])
+
+ def test_error(self):
+ cmd_retval = 1
+ cmd_output = "cmd output"
+ mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ mock_runner.run.return_value = (cmd_output, cmd_retval)
+
+ assert_raise_library_error(
+ lambda: lib.set_expected_votes(mock_runner, 3),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR,
+ {
+ "reason": cmd_output,
+ }
+ )
+ )
+
+ mock_runner.run.assert_called_once_with([
+ self.path("corosync-quorumtool"), "-e", "3"
+ ])
+
diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py
new file mode 100644
index 0000000..e0332f1
--- /dev/null
+++ b/pcs/test/test_lib_corosync_qdevice_client.py
@@ -0,0 +1,60 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import assert_raise_library_error
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.lib.external import CommandRunner
+
+import pcs.lib.corosync.qdevice_client as lib
+
+
+class GetStatusTextTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ self.qdevice_tool = "/usr/sbin/corosync-qdevice-tool"
+
+ def test_success(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.get_status_text(self.mock_runner)
+ )
+ self.mock_runner.run.assert_called_once_with([
+ self.qdevice_tool, "-s"
+ ])
+
+ def test_success_verbose(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.get_status_text(self.mock_runner, True)
+ )
+ self.mock_runner.run.assert_called_once_with([
+ self.qdevice_tool, "-s", "-v"
+ ])
+
+ def test_error(self):
+ self.mock_runner.run.return_value = ("status error", 1)
+ assert_raise_library_error(
+ lambda: lib.get_status_text(self.mock_runner),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
+ {
+ "reason": "status error",
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with([
+ self.qdevice_tool, "-s"
+ ])
+
diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
index 38bc9c8..3d473f7 100644
--- a/pcs/test/test_lib_corosync_qdevice_net.py
+++ b/pcs/test/test_lib_corosync_qdevice_net.py
@@ -7,18 +7,40 @@ from __future__ import (
from unittest import TestCase
+import base64
+import os.path
+
from pcs.test.tools.pcs_mock import mock
from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import get_test_resource
+from pcs import settings
from pcs.common import report_codes
-from pcs.lib.errors import ReportItemSeverity as severity
-from pcs.lib.external import CommandRunner
+from pcs.lib import reports
+from pcs.lib.errors import ReportItemSeverity as severity, LibraryError
+from pcs.lib.external import (
+ CommandRunner,
+ NodeCommunicator,
+ NodeCommunicationException,
+)
import pcs.lib.corosync.qdevice_net as lib
-_qnetd_cert_dir = "/etc/corosync/qdevice/net/qnetd/nssdb"
-_qnetd_tool = "/usr/sbin/corosync-qnetd-certutil"
+_qnetd_cert_dir = "/etc/corosync/qnetd/nssdb"
+_qnetd_cert_tool = "/usr/bin/corosync-qnetd-certutil"
+_qnetd_tool = "/usr/bin/corosync-qnetd-tool"
+_client_cert_dir = "/etc/corosync/qdevice/net/nssdb"
+_client_cert_tool = "/usr/sbin/corosync-qdevice-net-certutil"
+
+def cert_to_url(cert):
+ return base64.b64encode(cert).decode("utf-8").replace("=", "%3D")
+
+class CertificateTestCase(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ self.mock_tmpfile = mock.MagicMock()
+ self.mock_tmpfile.name = "tmpfile path"
@mock.patch("pcs.lib.corosync.qdevice_net.external.is_dir_nonempty")
class QdeviceSetupTest(TestCase):
@@ -32,7 +54,7 @@ class QdeviceSetupTest(TestCase):
lib.qdevice_setup(self.mock_runner)
mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
- self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"])
+ self.mock_runner.run.assert_called_once_with([_qnetd_cert_tool, "-i"])
def test_cert_db_exists(self, mock_is_dir_nonempty):
mock_is_dir_nonempty.return_value = True
@@ -47,7 +69,7 @@ class QdeviceSetupTest(TestCase):
)
mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
- self.mock_runner.assert_not_called()
+ self.mock_runner.run.assert_not_called()
def test_init_tool_fail(self, mock_is_dir_nonempty):
mock_is_dir_nonempty.return_value = False
@@ -66,16 +88,24 @@ class QdeviceSetupTest(TestCase):
)
mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
- self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"])
+ self.mock_runner.run.assert_called_once_with([_qnetd_cert_tool, "-i"])
@mock.patch("pcs.lib.corosync.qdevice_net.shutil.rmtree")
+ at mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized")
class QdeviceDestroyTest(TestCase):
- def test_success(self, mock_rmtree):
+ def test_success(self, mock_initialized, mock_rmtree):
+ mock_initialized.return_value = True
lib.qdevice_destroy()
mock_rmtree.assert_called_once_with(_qnetd_cert_dir)
- def test_cert_dir_rm_error(self, mock_rmtree):
+ def test_not_initialized(self, mock_initialized, mock_rmtree):
+ mock_initialized.return_value = False
+ lib.qdevice_destroy()
+ mock_rmtree.assert_not_called()
+
+ def test_cert_dir_rm_error(self, mock_initialized, mock_rmtree):
+ mock_initialized.return_value = True
mock_rmtree.side_effect = EnvironmentError("test errno", "test message")
assert_raise_library_error(
lib.qdevice_destroy,
@@ -89,3 +119,920 @@ class QdeviceDestroyTest(TestCase):
)
)
mock_rmtree.assert_called_once_with(_qnetd_cert_dir)
+
+
+class QdeviceStatusGenericTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+ def test_success(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.qdevice_status_generic_text(self.mock_runner)
+ )
+ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"])
+
+ def test_success_verbose(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.qdevice_status_generic_text(self.mock_runner, True)
+ )
+ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s", "-v"])
+
+ def test_error(self):
+ self.mock_runner.run.return_value = ("status error", 1)
+ assert_raise_library_error(
+ lambda: lib.qdevice_status_generic_text(self.mock_runner),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_GET_STATUS_ERROR,
+ {
+ "model": "net",
+ "reason": "status error",
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"])
+
+
+class QdeviceStatusClusterTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+ def test_success(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.qdevice_status_cluster_text(self.mock_runner)
+ )
+ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
+
+ def test_success_verbose(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.qdevice_status_cluster_text(self.mock_runner, verbose=True)
+ )
+ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l", "-v"])
+
+ def test_success_cluster(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.qdevice_status_cluster_text(self.mock_runner, "cluster")
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _qnetd_tool, "-l", "-c", "cluster"
+ ])
+
+ def test_success_cluster_verbose(self):
+ self.mock_runner.run.return_value = ("status info", 0)
+ self.assertEqual(
+ "status info",
+ lib.qdevice_status_cluster_text(self.mock_runner, "cluster", True)
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _qnetd_tool, "-l", "-v", "-c", "cluster"
+ ])
+
+ def test_error(self):
+ self.mock_runner.run.return_value = ("status error", 1)
+ assert_raise_library_error(
+ lambda: lib.qdevice_status_cluster_text(self.mock_runner),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_GET_STATUS_ERROR,
+ {
+ "model": "net",
+ "reason": "status error",
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
+
+
+ at mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
+ at mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
+class QdeviceSignCertificateRequestTest(CertificateTestCase):
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.qdevice_initialized",
+ lambda: True
+ )
+ def test_success(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output", 0)
+ mock_get_cert.return_value = "new certificate".encode("utf-8")
+
+ result = lib.qdevice_sign_certificate_request(
+ self.mock_runner,
+ "certificate request",
+ "clusterName"
+ )
+ self.assertEqual(result, mock_get_cert.return_value)
+
+ mock_tmp_store.assert_called_once_with(
+ "certificate request",
+ reports.qdevice_certificate_sign_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _qnetd_cert_tool,
+ "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName"
+ ])
+ mock_get_cert.assert_called_once_with(
+ "tool output",
+ reports.qdevice_certificate_sign_error
+ )
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.qdevice_initialized",
+ lambda: False
+ )
+ def test_not_initialized(self, mock_tmp_store, mock_get_cert):
+ assert_raise_library_error(
+ lambda: lib.qdevice_sign_certificate_request(
+ self.mock_runner,
+ "certificate request",
+ "clusterName"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_NOT_INITIALIZED,
+ {
+ "model": "net",
+ }
+ )
+ )
+ mock_tmp_store.assert_not_called()
+ self.mock_runner.run.assert_not_called()
+ mock_get_cert.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.qdevice_initialized",
+ lambda: True
+ )
+ def test_input_write_error(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.side_effect = LibraryError
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.qdevice_sign_certificate_request(
+ self.mock_runner,
+ "certificate request",
+ "clusterName"
+ )
+ )
+
+ self.mock_runner.run.assert_not_called()
+ mock_get_cert.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.qdevice_initialized",
+ lambda: True
+ )
+ def test_sign_error(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output error", 1)
+
+ assert_raise_library_error(
+ lambda: lib.qdevice_sign_certificate_request(
+ self.mock_runner,
+ "certificate request",
+ "clusterName"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR,
+ {
+ "reason": "tool output error",
+ }
+ )
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "certificate request",
+ reports.qdevice_certificate_sign_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _qnetd_cert_tool,
+ "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName"
+ ])
+ mock_get_cert.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.qdevice_initialized",
+ lambda: True
+ )
+ def test_output_read_error(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output", 0)
+ mock_get_cert.side_effect = LibraryError
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.qdevice_sign_certificate_request(
+ self.mock_runner,
+ "certificate request",
+ "clusterName"
+ )
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "certificate request",
+ reports.qdevice_certificate_sign_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _qnetd_cert_tool,
+ "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName"
+ ])
+ mock_get_cert.assert_called_once_with(
+ "tool output",
+ reports.qdevice_certificate_sign_error
+ )
+
+
+ at mock.patch("pcs.lib.corosync.qdevice_net.shutil.rmtree")
+ at mock.patch("pcs.lib.corosync.qdevice_net.client_initialized")
+class ClientDestroyTest(TestCase):
+ def test_success(self, mock_initialized, mock_rmtree):
+ mock_initialized.return_value = True
+ lib.client_destroy()
+ mock_rmtree.assert_called_once_with(_client_cert_dir)
+
+ def test_not_initialized(self, mock_initialized, mock_rmtree):
+ mock_initialized.return_value = False
+ lib.client_destroy()
+ mock_rmtree.assert_not_called()
+
+ def test_cert_dir_rm_error(self, mock_initialized, mock_rmtree):
+ mock_initialized.return_value = True
+ mock_rmtree.side_effect = EnvironmentError("test errno", "test message")
+ assert_raise_library_error(
+ lib.client_destroy,
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_DESTROY_ERROR,
+ {
+ "model": "net",
+ "reason": "test message",
+ }
+ )
+ )
+ mock_rmtree.assert_called_once_with(_client_cert_dir)
+
+
+class ClientSetupTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ self.original_path = settings.corosync_qdevice_net_client_certs_dir
+ settings.corosync_qdevice_net_client_certs_dir = get_test_resource(
+ "qdevice-certs"
+ )
+ self.ca_file_path = os.path.join(
+ settings.corosync_qdevice_net_client_certs_dir,
+ settings.corosync_qdevice_net_client_ca_file_name
+ )
+
+ def tearDown(self):
+ settings.corosync_qdevice_net_client_certs_dir = self.original_path
+
+ @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
+ def test_success(self, mock_destroy):
+ self.mock_runner.run.return_value = ("tool output", 0)
+
+ lib.client_setup(self.mock_runner, "certificate data".encode("utf-8"))
+
+ self.assertEqual(
+ "certificate data".encode("utf-8"),
+ open(self.ca_file_path, "rb").read()
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-i", "-c", self.ca_file_path
+ ])
+ mock_destroy.assert_called_once_with()
+
+ @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
+ def test_init_error(self, mock_destroy):
+ self.mock_runner.run.return_value = ("tool output error", 1)
+
+ assert_raise_library_error(
+ lambda: lib.client_setup(
+ self.mock_runner,
+ "certificate data".encode("utf-8")
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_INITIALIZATION_ERROR,
+ {
+ "model": "net",
+ "reason": "tool output error",
+ }
+ )
+ )
+
+ self.assertEqual(
+ "certificate data".encode("utf-8"),
+ open(self.ca_file_path, "rb").read()
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-i", "-c", self.ca_file_path
+ ])
+ mock_destroy.assert_called_once_with()
+
+
+ at mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
+class ClientGenerateCertificateRequestTest(CertificateTestCase):
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_success(self, mock_get_cert):
+ self.mock_runner.run.return_value = ("tool output", 0)
+ mock_get_cert.return_value = "new certificate".encode("utf-8")
+
+ result = lib.client_generate_certificate_request(
+ self.mock_runner,
+ "clusterName"
+ )
+ self.assertEqual(result, mock_get_cert.return_value)
+
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-r", "-n", "clusterName"
+ ])
+ self.assertEqual(1, len(mock_get_cert.mock_calls))
+ self.assertEqual(
+ "tool output",
+ mock_get_cert.call_args[0][0]
+ )
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: False
+ )
+ def test_not_initialized(self, mock_get_cert):
+ assert_raise_library_error(
+ lambda: lib.client_generate_certificate_request(
+ self.mock_runner,
+ "clusterName"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_NOT_INITIALIZED,
+ {
+ "model": "net",
+ }
+ )
+ )
+ self.mock_runner.run.assert_not_called()
+ mock_get_cert.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_tool_error(self, mock_get_cert):
+ self.mock_runner.run.return_value = ("tool output error", 1)
+
+ assert_raise_library_error(
+ lambda: lib.client_generate_certificate_request(
+ self.mock_runner,
+ "clusterName"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_INITIALIZATION_ERROR,
+ {
+ "model": "net",
+ "reason": "tool output error",
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-r", "-n", "clusterName"
+ ])
+ mock_get_cert.assert_not_called()
+
+
+ at mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
+ at mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
+class ClientCertRequestToPk12Test(CertificateTestCase):
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_success(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output", 0)
+ mock_get_cert.return_value = "new certificate".encode("utf-8")
+
+ result = lib.client_cert_request_to_pk12(
+ self.mock_runner,
+ "certificate request"
+ )
+ self.assertEqual(result, mock_get_cert.return_value)
+
+ mock_tmp_store.assert_called_once_with(
+ "certificate request",
+ reports.qdevice_certificate_import_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-M", "-c", self.mock_tmpfile.name
+ ])
+ mock_get_cert.assert_called_once_with(
+ "tool output",
+ reports.qdevice_certificate_import_error
+ )
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: False
+ )
+ def test_not_initialized(self, mock_tmp_store, mock_get_cert):
+ assert_raise_library_error(
+ lambda: lib.client_cert_request_to_pk12(
+ self.mock_runner,
+ "certificate request"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_NOT_INITIALIZED,
+ {
+ "model": "net",
+ }
+ )
+ )
+ mock_tmp_store.assert_not_called()
+ self.mock_runner.run.assert_not_called()
+ mock_get_cert.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_input_write_error(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.side_effect = LibraryError
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.client_cert_request_to_pk12(
+ self.mock_runner,
+ "certificate request"
+ )
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "certificate request",
+ reports.qdevice_certificate_import_error
+ )
+ self.mock_runner.run.assert_not_called()
+ mock_get_cert.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_transform_error(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output error", 1)
+
+ assert_raise_library_error(
+ lambda: lib.client_cert_request_to_pk12(
+ self.mock_runner,
+ "certificate request"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
+ {
+ "reason": "tool output error",
+ }
+ )
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "certificate request",
+ reports.qdevice_certificate_import_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-M", "-c", self.mock_tmpfile.name
+ ])
+ mock_get_cert.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_output_read_error(self, mock_tmp_store, mock_get_cert):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output", 0)
+ mock_get_cert.side_effect = LibraryError
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.client_cert_request_to_pk12(
+ self.mock_runner,
+ "certificate request"
+ )
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "certificate request",
+ reports.qdevice_certificate_import_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-M", "-c", self.mock_tmpfile.name
+ ])
+ mock_get_cert.assert_called_once_with(
+ "tool output",
+ reports.qdevice_certificate_import_error
+ )
+
+
+ at mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
+class ClientImportCertificateAndKeyTest(CertificateTestCase):
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_success(self, mock_tmp_store):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output", 0)
+
+ lib.client_import_certificate_and_key(
+ self.mock_runner,
+ "pk12 certificate"
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "pk12 certificate",
+ reports.qdevice_certificate_import_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-m", "-c", self.mock_tmpfile.name
+ ])
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: False
+ )
+ def test_not_initialized(self, mock_tmp_store):
+ assert_raise_library_error(
+ lambda: lib.client_import_certificate_and_key(
+ self.mock_runner,
+ "pk12 certificate"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_NOT_INITIALIZED,
+ {
+ "model": "net",
+ }
+ )
+ )
+
+ mock_tmp_store.assert_not_called()
+ self.mock_runner.run.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_input_write_error(self, mock_tmp_store):
+ mock_tmp_store.side_effect = LibraryError
+
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.client_import_certificate_and_key(
+ self.mock_runner,
+ "pk12 certificate"
+ )
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "pk12 certificate",
+ reports.qdevice_certificate_import_error
+ )
+ self.mock_runner.run.assert_not_called()
+
+ @mock.patch(
+ "pcs.lib.corosync.qdevice_net.client_initialized",
+ lambda: True
+ )
+ def test_import_error(self, mock_tmp_store):
+ mock_tmp_store.return_value = self.mock_tmpfile
+ self.mock_runner.run.return_value = ("tool output error", 1)
+
+ assert_raise_library_error(
+ lambda: lib.client_import_certificate_and_key(
+ self.mock_runner,
+ "pk12 certificate"
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
+ {
+ "reason": "tool output error",
+ }
+ )
+ )
+
+ mock_tmp_store.assert_called_once_with(
+ "pk12 certificate",
+ reports.qdevice_certificate_import_error
+ )
+ mock_tmp_store.assert_called_once_with(
+ "pk12 certificate",
+ reports.qdevice_certificate_import_error
+ )
+ self.mock_runner.run.assert_called_once_with([
+ _client_cert_tool, "-m", "-c", self.mock_tmpfile.name
+ ])
+
+
+class RemoteQdeviceGetCaCertificate(TestCase):
+ def test_success(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ expected_result = "abcd".encode("utf-8")
+ mock_communicator.call_host.return_value = base64.b64encode(
+ expected_result
+ )
+
+ result = lib.remote_qdevice_get_ca_certificate(
+ mock_communicator,
+ "qdevice host"
+ )
+ self.assertEqual(result, expected_result)
+
+ mock_communicator.call_host.assert_called_once_with(
+ "qdevice host",
+ "remote/qdevice_net_get_ca_certificate",
+ None
+ )
+
+ def test_decode_error(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_host.return_value = "error"
+
+ assert_raise_library_error(
+ lambda: lib.remote_qdevice_get_ca_certificate(
+ mock_communicator,
+ "qdevice host"
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_RESPONSE_FORMAT,
+ {
+ "node": "qdevice host",
+ }
+ )
+ )
+
+ def test_comunication_error(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_host.side_effect = NodeCommunicationException(
+ "qdevice host", "command", "reason"
+ )
+
+ self.assertRaises(
+ NodeCommunicationException,
+ lambda: lib.remote_qdevice_get_ca_certificate(
+ mock_communicator,
+ "qdevice host"
+ )
+ )
+
+
+class RemoteClientSetupTest(TestCase):
+ def test_success(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ node = "node address"
+ ca_cert = "CA certificate".encode("utf-8")
+
+ lib.remote_client_setup(mock_communicator, node, ca_cert)
+
+ mock_communicator.call_node.assert_called_once_with(
+ node,
+ "remote/qdevice_net_client_init_certificate_storage",
+ "ca_certificate={0}".format(
+ cert_to_url(ca_cert)
+ )
+ )
+
+ def test_comunication_error(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_node.side_effect = NodeCommunicationException(
+ "node address", "command", "reason"
+ )
+
+ self.assertRaises(
+ NodeCommunicationException,
+ lambda: lib.remote_client_setup(
+ mock_communicator,
+ "node address",
+ "ca cert".encode("utf-8")
+ )
+ )
+
+
+class RemoteSignCertificateRequestTest(TestCase):
+ def test_success(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ cert_request = "request".encode("utf-8")
+ expected_result = "abcd".encode("utf-8")
+ host = "qdevice host"
+ cluster_name = "ClusterName"
+ mock_communicator.call_host.return_value = base64.b64encode(
+ expected_result
+ )
+
+ result = lib.remote_sign_certificate_request(
+ mock_communicator,
+ host,
+ cert_request,
+ cluster_name
+ )
+ self.assertEqual(result, expected_result)
+
+ mock_communicator.call_host.assert_called_once_with(
+ host,
+ "remote/qdevice_net_sign_node_certificate",
+ "certificate_request={0}&cluster_name={1}".format(
+ cert_to_url(cert_request),
+ cluster_name
+ )
+ )
+
+ def test_decode_error(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_host.return_value = "error"
+
+ assert_raise_library_error(
+ lambda: lib.remote_sign_certificate_request(
+ mock_communicator,
+ "qdevice host",
+ "cert request".encode("utf-8"),
+ "cluster name"
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_RESPONSE_FORMAT,
+ {
+ "node": "qdevice host",
+ }
+ )
+ )
+
+ def test_comunication_error(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_host.side_effect = NodeCommunicationException(
+ "qdevice host", "command", "reason"
+ )
+
+ self.assertRaises(
+ NodeCommunicationException,
+ lambda: lib.remote_sign_certificate_request(
+ mock_communicator,
+ "qdevice host",
+ "cert request".encode("utf-8"),
+ "cluster name"
+ )
+ )
+
+
+class RemoteClientImportCertificateAndKeyTest(TestCase):
+ def test_success(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ node = "node address"
+ pk12_cert = "pk12 certificate".encode("utf-8")
+
+ lib.remote_client_import_certificate_and_key(
+ mock_communicator,
+ node,
+ pk12_cert
+ )
+
+ mock_communicator.call_node.assert_called_once_with(
+ node,
+ "remote/qdevice_net_client_import_certificate",
+ "certificate={0}".format(
+ cert_to_url(pk12_cert)
+ )
+ )
+
+ def test_comunication_error(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_node.side_effect = NodeCommunicationException(
+ "node address", "command", "reason"
+ )
+
+ self.assertRaises(
+ NodeCommunicationException,
+ lambda: lib.remote_client_import_certificate_and_key(
+ mock_communicator,
+ "node address",
+ "pk12 cert".encode("utf-8")
+ )
+ )
+
+
+class RemoteClientDestroy(TestCase):
+ def test_success(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ node = "node address"
+
+ lib.remote_client_destroy(mock_communicator, node)
+
+ mock_communicator.call_node.assert_called_once_with(
+ node,
+ "remote/qdevice_net_client_destroy",
+ None
+ )
+
+ def test_comunication_error(self):
+ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ mock_communicator.call_node.side_effect = NodeCommunicationException(
+ "node address", "command", "reason"
+ )
+
+ self.assertRaises(
+ NodeCommunicationException,
+ lambda: lib.remote_client_destroy(mock_communicator, "node address")
+ )
+
+
+class GetOutputCertificateTest(TestCase):
+ def setUp(self):
+ self.file_path = get_test_resource("qdevice-certs/qnetd-cacert.crt")
+ self.file_data = open(self.file_path, "rb").read()
+
+ def test_success(self):
+ cert_tool_output = """
+some line
+Certificate stored in {0}
+some other line
+ """.format(self.file_path)
+ report_func = mock.MagicMock()
+
+ self.assertEqual(
+ self.file_data,
+ lib._get_output_certificate(cert_tool_output, report_func)
+ )
+ report_func.assert_not_called()
+
+ def test_success_request(self):
+ cert_tool_output = """
+some line
+Certificate request stored in {0}
+some other line
+ """.format(self.file_path)
+ report_func = mock.MagicMock()
+
+ self.assertEqual(
+ self.file_data,
+ lib._get_output_certificate(cert_tool_output, report_func)
+ )
+ report_func.assert_not_called()
+
+ def test_message_not_found(self):
+ cert_tool_output = "some rubbish output"
+ report_func = reports.qdevice_certificate_import_error
+
+ assert_raise_library_error(
+ lambda: lib._get_output_certificate(
+ cert_tool_output,
+ report_func
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
+ {
+ "reason": cert_tool_output,
+ }
+ )
+ )
+
+ def test_cannot_read_file(self):
+ cert_tool_output = """
+some line
+Certificate request stored in {0}.bad
+some other line
+ """.format(self.file_path)
+ report_func = reports.qdevice_certificate_import_error
+
+ assert_raise_library_error(
+ lambda: lib._get_output_certificate(
+ cert_tool_output,
+ report_func
+ ),
+ (
+ severity.ERROR,
+ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
+ {
+ "reason": "{0}.bad: No such file or directory".format(
+ self.file_path
+ ),
+ }
+ )
+ )
+
diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py
index fbaac09..c6322b7 100644
--- a/pcs/test/test_lib_env.py
+++ b/pcs/test/test_lib_env.py
@@ -7,8 +7,13 @@ from __future__ import (
from unittest import TestCase
import logging
+from lxml import etree
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.assertions import (
+ assert_raise_library_error,
+ assert_xml_equal,
+ assert_report_item_list_equal,
+)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_mock import mock
@@ -82,13 +87,13 @@ class LibraryEnvironmentTest(TestCase):
self.assertFalse(env.is_cib_live)
- self.assertEqual(cib_data, env.get_cib_xml())
+ self.assertEqual(cib_data, env._get_cib_xml())
self.assertEqual(0, mock_get_cib.call_count)
- env.push_cib_xml(new_cib_data)
+ env._push_cib_xml(new_cib_data)
self.assertEqual(0, mock_push_cib.call_count)
- self.assertEqual(new_cib_data, env.get_cib_xml())
+ self.assertEqual(new_cib_data, env._get_cib_xml())
self.assertEqual(0, mock_get_cib.call_count)
@mock.patch("pcs.lib.env.replace_cib_configuration_xml")
@@ -101,19 +106,153 @@ class LibraryEnvironmentTest(TestCase):
self.assertTrue(env.is_cib_live)
- self.assertEqual(cib_data, env.get_cib_xml())
+ self.assertEqual(cib_data, env._get_cib_xml())
self.assertEqual(1, mock_get_cib.call_count)
- env.push_cib_xml(new_cib_data)
+ env._push_cib_xml(new_cib_data)
self.assertEqual(1, mock_push_cib.call_count)
+ @mock.patch("pcs.lib.env.ensure_cib_version")
+ @mock.patch("pcs.lib.env.get_cib_xml")
+ def test_get_cib_no_version_live(
+ self, mock_get_cib_xml, mock_ensure_cib_version
+ ):
+ mock_get_cib_xml.return_value = '<cib/>'
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode())
+ self.assertEqual(1, mock_get_cib_xml.call_count)
+ self.assertEqual(0, mock_ensure_cib_version.call_count)
+ self.assertFalse(env.cib_upgraded)
+
+ @mock.patch("pcs.lib.env.ensure_cib_version")
+ @mock.patch("pcs.lib.env.get_cib_xml")
+ def test_get_cib_upgrade_live(
+ self, mock_get_cib_xml, mock_ensure_cib_version
+ ):
+ mock_get_cib_xml.return_value = '<cib/>'
+ mock_ensure_cib_version.return_value = etree.XML('<new_cib/>')
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ assert_xml_equal(
+ '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
+ )
+ self.assertEqual(1, mock_get_cib_xml.call_count)
+ self.assertEqual(1, mock_ensure_cib_version.call_count)
+ self.assertTrue(env.cib_upgraded)
+
+ @mock.patch("pcs.lib.env.ensure_cib_version")
+ @mock.patch("pcs.lib.env.get_cib_xml")
+ def test_get_cib_no_upgrade_live(
+ self, mock_get_cib_xml, mock_ensure_cib_version
+ ):
+ mock_get_cib_xml.return_value = '<cib/>'
+ mock_ensure_cib_version.return_value = None
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ assert_xml_equal(
+ '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
+ )
+ self.assertEqual(1, mock_get_cib_xml.call_count)
+ self.assertEqual(1, mock_ensure_cib_version.call_count)
+ self.assertFalse(env.cib_upgraded)
+
+ @mock.patch("pcs.lib.env.ensure_cib_version")
+ @mock.patch("pcs.lib.env.get_cib_xml")
+ def test_get_cib_no_version_file(
+ self, mock_get_cib_xml, mock_ensure_cib_version
+ ):
+ env = LibraryEnvironment(
+ self.mock_logger, self.mock_reporter, cib_data='<cib/>'
+ )
+ assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode())
+ self.assertEqual(0, mock_get_cib_xml.call_count)
+ self.assertEqual(0, mock_ensure_cib_version.call_count)
+ self.assertFalse(env.cib_upgraded)
+
+ @mock.patch("pcs.lib.env.ensure_cib_version")
+ @mock.patch("pcs.lib.env.get_cib_xml")
+ def test_get_cib_upgrade_file(
+ self, mock_get_cib_xml, mock_ensure_cib_version
+ ):
+ mock_ensure_cib_version.return_value = etree.XML('<new_cib/>')
+ env = LibraryEnvironment(
+ self.mock_logger, self.mock_reporter, cib_data='<cib/>'
+ )
+ assert_xml_equal(
+ '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
+ )
+ self.assertEqual(0, mock_get_cib_xml.call_count)
+ self.assertEqual(1, mock_ensure_cib_version.call_count)
+ self.assertTrue(env.cib_upgraded)
+
+ @mock.patch("pcs.lib.env.ensure_cib_version")
+ @mock.patch("pcs.lib.env.get_cib_xml")
+ def test_get_cib_no_upgrade_file(
+ self, mock_get_cib_xml, mock_ensure_cib_version
+ ):
+ mock_ensure_cib_version.return_value = None
+ env = LibraryEnvironment(
+ self.mock_logger, self.mock_reporter, cib_data='<cib/>'
+ )
+ assert_xml_equal(
+ '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
+ )
+ self.assertEqual(0, mock_get_cib_xml.call_count)
+ self.assertEqual(1, mock_ensure_cib_version.call_count)
+ self.assertFalse(env.cib_upgraded)
+
+ @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
+ @mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock cmd runner"
+ )
+ def test_push_cib_not_upgraded_live(self, mock_replace_cib):
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ env.push_cib(etree.XML('<cib/>'))
+ mock_replace_cib.assert_called_once_with(
+ "mock cmd runner", '<cib/>', False
+ )
+ self.assertEqual([], env.report_processor.report_item_list)
+
+ @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
+ @mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock cmd runner"
+ )
+ def test_push_cib_upgraded_live(self, mock_replace_cib):
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ env._cib_upgraded = True
+ env.push_cib(etree.XML('<cib/>'))
+ mock_replace_cib.assert_called_once_with(
+ "mock cmd runner", '<cib/>', True
+ )
+ assert_report_item_list_equal(
+ env.report_processor.report_item_list,
+ [(
+ severity.INFO,
+ report_codes.CIB_UPGRADE_SUCCESSFUL,
+ {}
+ )]
+ )
+
+ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
@mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
@mock.patch("pcs.lib.env.reload_corosync_config")
@mock.patch("pcs.lib.env.distribute_corosync_conf")
@mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @mock.patch.object(
+ LibraryEnvironment,
+ "node_communicator",
+ lambda self: "mock node communicator"
+ )
+ @mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock cmd runner"
+ )
def test_corosync_conf_set(
self, mock_get_corosync, mock_distribute, mock_reload,
- mock_check_offline
+ mock_check_offline, mock_qdevice_reload
):
corosync_data = "totem {\n version: 2\n}\n"
new_corosync_data = "totem {\n version: 3\n}\n"
@@ -138,8 +277,61 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual(0, mock_get_corosync.call_count)
mock_check_offline.assert_not_called()
mock_reload.assert_not_called()
+ mock_qdevice_reload.assert_not_called()
+
+ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
+ @mock.patch("pcs.lib.env.reload_corosync_config")
+ @mock.patch("pcs.lib.env.is_service_running")
+ @mock.patch("pcs.lib.env.distribute_corosync_conf")
+ @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @mock.patch.object(
+ CorosyncConfigFacade,
+ "get_nodes",
+ lambda self: "mock node list"
+ )
+ @mock.patch.object(
+ LibraryEnvironment,
+ "node_communicator",
+ lambda self: "mock node communicator"
+ )
+ @mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock cmd runner"
+ )
+ def test_corosync_conf_not_set_online(
+ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
+ mock_qdevice_reload
+ ):
+ corosync_data = open(rc("corosync.conf")).read()
+ new_corosync_data = corosync_data.replace("version: 2", "version: 3")
+ mock_get_corosync.return_value = corosync_data
+ mock_is_running.return_value = True
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+ self.assertTrue(env.is_corosync_conf_live)
+
+ self.assertEqual(corosync_data, env.get_corosync_conf_data())
+ self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
+ self.assertEqual(2, mock_get_corosync.call_count)
+
+ env.push_corosync_conf(
+ CorosyncConfigFacade.from_string(new_corosync_data)
+ )
+ mock_distribute.assert_called_once_with(
+ "mock node communicator",
+ self.mock_reporter,
+ "mock node list",
+ new_corosync_data,
+ False
+ )
+ mock_is_running.assert_called_once_with("mock cmd runner", "corosync")
+ mock_reload.assert_called_once_with("mock cmd runner")
+ mock_qdevice_reload.assert_not_called()
+ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
@mock.patch("pcs.lib.env.reload_corosync_config")
+ @mock.patch("pcs.lib.env.is_service_running")
@mock.patch("pcs.lib.env.distribute_corosync_conf")
@mock.patch("pcs.lib.env.get_local_corosync_conf")
@mock.patch.object(
@@ -157,12 +349,14 @@ class LibraryEnvironmentTest(TestCase):
"cmd_runner",
lambda self: "mock cmd runner"
)
- def test_corosync_conf_not_set(
- self, mock_get_corosync, mock_distribute, mock_reload
+ def test_corosync_conf_not_set_offline(
+ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
+ mock_qdevice_reload
):
corosync_data = open(rc("corosync.conf")).read()
new_corosync_data = corosync_data.replace("version: 2", "version: 3")
mock_get_corosync.return_value = corosync_data
+ mock_is_running.return_value = False
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertTrue(env.is_corosync_conf_live)
@@ -181,10 +375,70 @@ class LibraryEnvironmentTest(TestCase):
new_corosync_data,
False
)
+ mock_is_running.assert_called_once_with("mock cmd runner", "corosync")
+ mock_reload.assert_not_called()
+ mock_qdevice_reload.assert_not_called()
+
+ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
+ @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
+ @mock.patch("pcs.lib.env.reload_corosync_config")
+ @mock.patch("pcs.lib.env.is_service_running")
+ @mock.patch("pcs.lib.env.distribute_corosync_conf")
+ @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @mock.patch.object(
+ CorosyncConfigFacade,
+ "get_nodes",
+ lambda self: "mock node list"
+ )
+ @mock.patch.object(
+ LibraryEnvironment,
+ "node_communicator",
+ lambda self: "mock node communicator"
+ )
+ @mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: "mock cmd runner"
+ )
+ def test_corosync_conf_not_set_need_qdevice_reload_success(
+ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
+ mock_check_offline, mock_qdevice_reload
+ ):
+ corosync_data = open(rc("corosync.conf")).read()
+ new_corosync_data = corosync_data.replace("version: 2", "version: 3")
+ mock_get_corosync.return_value = corosync_data
+ mock_is_running.return_value = True
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+ self.assertTrue(env.is_corosync_conf_live)
+
+ self.assertEqual(corosync_data, env.get_corosync_conf_data())
+ self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
+ self.assertEqual(2, mock_get_corosync.call_count)
+
+ conf_facade = CorosyncConfigFacade.from_string(new_corosync_data)
+ conf_facade._need_qdevice_reload = True
+ env.push_corosync_conf(conf_facade)
+ mock_check_offline.assert_not_called()
+ mock_distribute.assert_called_once_with(
+ "mock node communicator",
+ self.mock_reporter,
+ "mock node list",
+ new_corosync_data,
+ False
+ )
mock_reload.assert_called_once_with("mock cmd runner")
+ mock_qdevice_reload.assert_called_once_with(
+ "mock node communicator",
+ self.mock_reporter,
+ "mock node list",
+ False
+ )
+ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
@mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
@mock.patch("pcs.lib.env.reload_corosync_config")
+ @mock.patch("pcs.lib.env.is_service_running")
@mock.patch("pcs.lib.env.distribute_corosync_conf")
@mock.patch("pcs.lib.env.get_local_corosync_conf")
@mock.patch.object(
@@ -198,12 +452,13 @@ class LibraryEnvironmentTest(TestCase):
lambda self: "mock node communicator"
)
def test_corosync_conf_not_set_need_offline_success(
- self, mock_get_corosync, mock_distribute, mock_reload,
- mock_check_offline
+ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
+ mock_check_offline, mock_qdevice_reload
):
corosync_data = open(rc("corosync.conf")).read()
new_corosync_data = corosync_data.replace("version: 2", "version: 3")
mock_get_corosync.return_value = corosync_data
+ mock_is_running.return_value = False
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertTrue(env.is_corosync_conf_live)
@@ -229,7 +484,9 @@ class LibraryEnvironmentTest(TestCase):
False
)
mock_reload.assert_not_called()
+ mock_qdevice_reload.assert_not_called()
+ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
@mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
@mock.patch("pcs.lib.env.reload_corosync_config")
@mock.patch("pcs.lib.env.distribute_corosync_conf")
@@ -246,7 +503,7 @@ class LibraryEnvironmentTest(TestCase):
)
def test_corosync_conf_not_set_need_offline_fail(
self, mock_get_corosync, mock_distribute, mock_reload,
- mock_check_offline
+ mock_check_offline, mock_qdevice_reload
):
corosync_data = open(rc("corosync.conf")).read()
new_corosync_data = corosync_data.replace("version: 2", "version: 3")
@@ -282,6 +539,7 @@ class LibraryEnvironmentTest(TestCase):
)
mock_distribute.assert_not_called()
mock_reload.assert_not_called()
+ mock_qdevice_reload.assert_not_called()
@mock.patch("pcs.lib.env.CommandRunner")
def test_cmd_runner_no_options(self, mock_runner):
diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
index c08b059..929a50d 100644
--- a/pcs/test/test_lib_external.py
+++ b/pcs/test/test_lib_external.py
@@ -31,7 +31,11 @@ from pcs.test.tools.pcs_mock import mock
from pcs import settings
from pcs.common import report_codes
-from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.lib import reports
+from pcs.lib.errors import (
+ LibraryError,
+ ReportItemSeverity as severity
+)
import pcs.lib.external as lib
@@ -830,6 +834,126 @@ class NodeCommunicatorExceptionTransformTest(TestCase):
self.assertTrue(raised)
+class ParallelCommunicationHelperTest(TestCase):
+ def setUp(self):
+ self.mock_reporter = MockLibraryReportProcessor()
+
+ def fixture_raiser(self):
+ def raiser(x, *args, **kwargs):
+ if x == 1:
+ raise lib.NodeConnectionException("node", "command", "reason")
+ elif x == 2:
+ raise LibraryError(
+ reports.corosync_config_distribution_node_error("node")
+ )
+ return raiser
+
+ def test_success(self):
+ func = mock.MagicMock()
+ lib.parallel_nodes_communication_helper(
+ func,
+ [([x], {"a": x*2,}) for x in range(3)],
+ self.mock_reporter,
+ skip_offline_nodes=False
+ )
+ expected_calls = [
+ mock.call(0, a=0),
+ mock.call(1, a=2),
+ mock.call(2, a=4),
+ ]
+ self.assertEqual(len(expected_calls), len(func.mock_calls))
+ func.assert_has_calls(expected_calls)
+ self.assertEqual(self.mock_reporter.report_item_list, [])
+
+ def test_errors(self):
+ func = self.fixture_raiser()
+ assert_raise_library_error(
+ lambda: lib.parallel_nodes_communication_helper(
+ func,
+ [([x], {"a": x*2,}) for x in range(4)],
+ self.mock_reporter,
+ skip_offline_nodes=False
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ {
+ "node": "node",
+ "reason": "reason",
+ "command": "command",
+ },
+ report_codes.SKIP_OFFLINE_NODES
+ ),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ {
+ "node": "node",
+ }
+ )
+ )
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ {
+ "node": "node",
+ "reason": "reason",
+ "command": "command",
+ },
+ report_codes.SKIP_OFFLINE_NODES
+ ),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ {
+ "node": "node",
+ }
+ )
+ ]
+ )
+
+ def test_errors_skip_offline(self):
+ func = self.fixture_raiser()
+ assert_raise_library_error(
+ lambda: lib.parallel_nodes_communication_helper(
+ func,
+ [([x], {"a": x*2,}) for x in range(4)],
+ self.mock_reporter,
+ skip_offline_nodes=True
+ ),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ {
+ "node": "node",
+ }
+ )
+ )
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ {
+ "node": "node",
+ "reason": "reason",
+ "command": "command",
+ }
+ ),
+ (
+ severity.ERROR,
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ {
+ "node": "node",
+ }
+ )
+ ]
+ )
+
class IsCmanClusterTest(TestCase):
def template_test(self, is_cman, corosync_output, corosync_retval=0):
mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py
index 6af47d7..cff88eb 100644
--- a/pcs/test/test_lib_nodes_task.py
+++ b/pcs/test/test_lib_nodes_task.py
@@ -27,14 +27,6 @@ class DistributeCorosyncConfTest(TestCase):
self.mock_reporter = MockLibraryReportProcessor()
self.mock_communicator = "mock node communicator"
- def assert_set_remote_corosync_conf_call(self, a_call, node_ring0, config):
- self.assertEqual("set_remote_corosync_conf", a_call[0])
- self.assertEqual(3, len(a_call[1]))
- self.assertEqual(self.mock_communicator, a_call[1][0])
- self.assertEqual(node_ring0, a_call[1][1].ring0)
- self.assertEqual(config, a_call[1][2])
- self.assertEqual(0, len(a_call[2]))
-
@mock.patch("pcs.lib.nodes_task.corosync_live")
def test_success(self, mock_corosync_live):
conf_text = "test conf text"
@@ -53,21 +45,19 @@ class DistributeCorosyncConfTest(TestCase):
corosync_live_calls = [
mock.call.set_remote_corosync_conf(
- "mock node communicator", nodes[0], conf_text
+ "mock node communicator", node_addrs_list[0], conf_text
),
mock.call.set_remote_corosync_conf(
- "mock node communicator", nodes[1], conf_text
+ "mock node communicator", node_addrs_list[1], conf_text
),
]
self.assertEqual(
len(corosync_live_calls),
len(mock_corosync_live.mock_calls)
)
- self.assert_set_remote_corosync_conf_call(
- mock_corosync_live.mock_calls[0], nodes[0], conf_text
- )
- self.assert_set_remote_corosync_conf_call(
- mock_corosync_live.mock_calls[1], nodes[1], conf_text
+ mock_corosync_live.set_remote_corosync_conf.assert_has_calls(
+ corosync_live_calls,
+ any_order=True
)
assert_report_item_list_equal(
@@ -145,12 +135,10 @@ class DistributeCorosyncConfTest(TestCase):
len(corosync_live_calls),
len(mock_corosync_live.mock_calls)
)
- self.assert_set_remote_corosync_conf_call(
- mock_corosync_live.mock_calls[0], nodes[0], conf_text
- )
- self.assert_set_remote_corosync_conf_call(
- mock_corosync_live.mock_calls[1], nodes[1], conf_text
- )
+ mock_corosync_live.set_remote_corosync_conf.assert_has_calls([
+ mock.call("mock node communicator", node_addrs_list[0], conf_text),
+ mock.call("mock node communicator", node_addrs_list[1], conf_text),
+ ], any_order=True)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
@@ -221,12 +209,10 @@ class DistributeCorosyncConfTest(TestCase):
len(corosync_live_calls),
len(mock_corosync_live.mock_calls)
)
- self.assert_set_remote_corosync_conf_call(
- mock_corosync_live.mock_calls[0], nodes[0], conf_text
- )
- self.assert_set_remote_corosync_conf_call(
- mock_corosync_live.mock_calls[1], nodes[1], conf_text
- )
+ mock_corosync_live.set_remote_corosync_conf.assert_has_calls([
+ mock.call("mock node communicator", node_addrs_list[0], conf_text),
+ mock.call("mock node communicator", node_addrs_list[1], conf_text),
+ ], any_order=True)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
@@ -452,6 +438,134 @@ class CheckCorosyncOfflineTest(TestCase):
)
+ at mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_stop")
+ at mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_start")
+class QdeviceReloadOnNodesTest(TestCase):
+ def setUp(self):
+ self.mock_reporter = MockLibraryReportProcessor()
+ self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+
+ def test_success(self, mock_remote_start, mock_remote_stop):
+ nodes = ["node1", "node2"]
+ node_addrs_list = NodeAddressesList(
+ [NodeAddresses(addr) for addr in nodes]
+ )
+
+ lib.qdevice_reload_on_nodes(
+ self.mock_communicator,
+ self.mock_reporter,
+ node_addrs_list
+ )
+
+ node_calls = [
+ mock.call(
+ self.mock_reporter, self.mock_communicator, node_addrs_list[0]
+ ),
+ mock.call(
+ self.mock_reporter, self.mock_communicator, node_addrs_list[1]
+ ),
+ ]
+ self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls))
+ self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls))
+ mock_remote_stop.assert_has_calls(node_calls, any_order=True)
+ mock_remote_start.assert_has_calls(node_calls, any_order=True)
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
+ {}
+ ),
+ ]
+ )
+
+ def test_fail_doesnt_prevent_start(
+ self, mock_remote_start, mock_remote_stop
+ ):
+ nodes = ["node1", "node2"]
+ node_addrs_list = NodeAddressesList(
+ [NodeAddresses(addr) for addr in nodes]
+ )
+ def raiser(reporter, communicator, node):
+ if node.ring0 == nodes[1]:
+ raise NodeAuthenticationException(
+ node.label, "command", "HTTP error: 401"
+ )
+ mock_remote_stop.side_effect = raiser
+
+ assert_raise_library_error(
+ lambda: lib.qdevice_reload_on_nodes(
+ self.mock_communicator,
+ self.mock_reporter,
+ node_addrs_list
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ {
+ "node": nodes[1],
+ "command": "command",
+ "reason" : "HTTP error: 401",
+ },
+ report_codes.SKIP_OFFLINE_NODES
+ )
+ )
+
+ node_calls = [
+ mock.call(
+ self.mock_reporter, self.mock_communicator, node_addrs_list[0]
+ ),
+ mock.call(
+ self.mock_reporter, self.mock_communicator, node_addrs_list[1]
+ ),
+ ]
+ self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls))
+ self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls))
+ mock_remote_stop.assert_has_calls(node_calls, any_order=True)
+ mock_remote_start.assert_has_calls(node_calls, any_order=True)
+
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
+ {}
+ ),
+ # why the same error twice?
+ # 1. Tested piece of code calls a function which puts an error
+ # into the reporter. The reporter raises an exception. The
+ # exception is caught in the tested piece of code, stored, and
+ # later put to reporter again.
+ # 2. Mock reporter remembers everything that goes through it
+ # and by the machanism described in 1 the error goes througt it
+ # twice.
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ {
+ "node": nodes[1],
+ "command": "command",
+ "reason" : "HTTP error: 401",
+ },
+ report_codes.SKIP_OFFLINE_NODES
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ {
+ "node": nodes[1],
+ "command": "command",
+ "reason" : "HTTP error: 401",
+ },
+ report_codes.SKIP_OFFLINE_NODES
+ ),
+ ]
+ )
+
+
class NodeCheckAuthTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py
index 85d2034..0edee5c 100644
--- a/pcs/test/test_lib_pacemaker.py
+++ b/pcs/test/test_lib_pacemaker.py
@@ -206,12 +206,28 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
mock_runner.run.assert_called_once_with(
[
- self.path("cibadmin"), "--replace", "--scope", "configuration",
- "--verbose", "--xml-pipe"
+ self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe",
+ "--scope", "configuration"
],
stdin_string=xml
)
+ def test_cib_upgraded(self):
+ xml = "<xml/>"
+ expected_output = "expected output"
+ expected_retval = 0
+ mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ mock_runner.run.return_value = (expected_output, expected_retval)
+
+ lib.replace_cib_configuration(
+ mock_runner, XmlManipulation.from_str(xml).tree, True
+ )
+
+ mock_runner.run.assert_called_once_with(
+ [self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe"],
+ stdin_string=xml
+ )
+
def test_error(self):
xml = "<xml/>"
expected_error = "expected error"
@@ -237,8 +253,8 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
mock_runner.run.assert_called_once_with(
[
- self.path("cibadmin"), "--replace", "--scope", "configuration",
- "--verbose", "--xml-pipe"
+ self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe",
+ "--scope", "configuration"
],
stdin_string=xml
)
diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
index 54c5669..e3c1401 100644
--- a/pcs/test/test_lib_sbd.py
+++ b/pcs/test/test_lib_sbd.py
@@ -360,7 +360,7 @@ class EnableSbdServiceTest(TestCase):
node = NodeAddresses("node1")
lib_sbd.enable_sbd_service(mock_communicator, node)
mock_communicator.call_node.assert_called_once_with(
- node, "remote/sbd_enable", ""
+ node, "remote/sbd_enable", None
)
@@ -408,7 +408,7 @@ class DisableSbdServiceTest(TestCase):
node = NodeAddresses("node1")
lib_sbd.disable_sbd_service(mock_communicator, node)
mock_communicator.call_node.assert_called_once_with(
- node, "remote/sbd_disable", ""
+ node, "remote/sbd_disable", None
)
@@ -456,7 +456,7 @@ class SetStonithWatchdogTimeoutToZeroTest(TestCase):
node = NodeAddresses("node1")
lib_sbd.set_stonith_watchdog_timeout_to_zero(mock_communicator, node)
mock_communicator.call_node.assert_called_once_with(
- node, "remote/set_stonith_watchdog_timeout_to_zero", ""
+ node, "remote/set_stonith_watchdog_timeout_to_zero", None
)
@@ -520,7 +520,7 @@ class RemoveStonithWatchdogTimeoutTest(TestCase):
node = NodeAddresses("node1")
lib_sbd.remove_stonith_watchdog_timeout(mock_communicator, node)
mock_communicator.call_node.assert_called_once_with(
- node, "remote/remove_stonith_watchdog_timeout", ""
+ node, "remote/remove_stonith_watchdog_timeout", None
)
@@ -584,7 +584,7 @@ class GetSbdConfigTest(TestCase):
node = NodeAddresses("node1")
lib_sbd.get_sbd_config(mock_communicator, node)
mock_communicator.call_node.assert_called_once_with(
- node, "remote/get_sbd_config", ""
+ node, "remote/get_sbd_config", None
)
diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
index 8167ad9..86de4c6 100644
--- a/pcs/test/test_quorum.py
+++ b/pcs/test/test_quorum.py
@@ -144,7 +144,7 @@ class DeviceAddTest(TestBase):
def test_success_model_only(self):
self.assert_pcs_success(
- "quorum device add model net host=127.0.0.1 algorithm=ffsplit"
+ "quorum device add model net host=127.0.0.1 algorithm=lms"
)
self.assert_pcs_success(
"quorum config",
@@ -152,7 +152,7 @@ class DeviceAddTest(TestBase):
Options:
Device:
Model: net
- algorithm: ffsplit
+ algorithm: lms
host: 127.0.0.1
"""
)
@@ -167,6 +167,7 @@ Device:
Options:
Device:
timeout: 12345
+ votes: 1
Model: net
algorithm: ffsplit
host: 127.0.0.1
@@ -193,7 +194,7 @@ Error: required option 'host' is missing
self.assert_pcs_fail(
"quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d",
"""\
-Error: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms, use --force to override
+Error: 'x' is not a valid algorithm value, use ffsplit, lms, use --force to override
Error: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker, use --force to override
Error: invalid quorum device option 'a', allowed options are: sync_timeout, timeout, use --force to override
Error: '-1' is not a valid timeout value, use positive integer, use --force to override
@@ -203,7 +204,7 @@ Error: '-1' is not a valid timeout value, use positive integer, use --force to o
self.assert_pcs_success(
"quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d --force",
"""\
-Warning: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms
+Warning: 'x' is not a valid algorithm value, use ffsplit, lms
Warning: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker
Warning: invalid quorum device option 'a', allowed options are: sync_timeout, timeout
Warning: '-1' is not a valid timeout value, use positive integer
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index e8c0813..2fa5088 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -1541,6 +1541,9 @@ Ordering Constraints:
Colocation Constraints:
Ticket Constraints:
+Alerts:
+ No alerts defined
+
Resources Defaults:
No defaults set
Operations Defaults:
@@ -1704,6 +1707,9 @@ Ordering Constraints:
Colocation Constraints:
Ticket Constraints:
+Alerts:
+ No alerts defined
+
Resources Defaults:
No defaults set
Operations Defaults:
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 479c8e9..a6ee2f5 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -149,6 +149,9 @@ Ordering Constraints:
Colocation Constraints:
Ticket Constraints:
+Alerts:
+ No alerts defined
+
Resources Defaults:
No defaults set
Operations Defaults:
diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
index c61a2b8..819f8ee 100644
--- a/pcs/test/test_utils.py
+++ b/pcs/test/test_utils.py
@@ -967,1359 +967,1607 @@ class UtilsTest(unittest.TestCase):
}
)
- def test_parse_cman_quorum_info(self):
- parsed = utils.parse_cman_quorum_info("""\
-Version: 6.2.0
-Config Version: 23
-Cluster Name: cluster66
-Cluster Id: 22265
-Cluster Member: Yes
-Cluster Generation: 3612
-Membership state: Cluster-Member
-Nodes: 3
-Expected votes: 3
-Total votes: 3
-Node votes: 1
-Quorum: 2
-Active subsystems: 8
-Flags:
-Ports Bound: 0
-Node name: rh66-node2
-Node ID: 2
-Multicast addresses: 239.192.86.80
-Node addresses: 192.168.122.61
----Votes---
-1 M 3 rh66-node1
-2 M 2 rh66-node2
-3 M 1 rh66-node3
-""")
- self.assertEqual(True, parsed["quorate"])
- self.assertEqual(2, parsed["quorum"])
+ def test_get_operations_from_transitions(self):
+ transitions = utils.parse(rc("transitions01.xml"))
self.assertEqual(
[
- {"name": "rh66-node1", "votes": 3, "local": False},
- {"name": "rh66-node2", "votes": 2, "local": True},
- {"name": "rh66-node3", "votes": 1, "local": False},
+ {
+ 'id': 'dummy',
+ 'long_id': 'dummy',
+ 'operation': 'stop',
+ 'on_node': 'rh7-3',
+ },
+ {
+ 'id': 'dummy',
+ 'long_id': 'dummy',
+ 'operation': 'start',
+ 'on_node': 'rh7-2',
+ },
+ {
+ 'id': 'd0',
+ 'long_id': 'd0:1',
+ 'operation': 'stop',
+ 'on_node': 'rh7-1',
+ },
+ {
+ 'id': 'd0',
+ 'long_id': 'd0:1',
+ 'operation': 'start',
+ 'on_node': 'rh7-2',
+ },
+ {
+ 'id': 'state',
+ 'long_id': 'state:0',
+ 'operation': 'stop',
+ 'on_node': 'rh7-3',
+ },
+ {
+ 'id': 'state',
+ 'long_id': 'state:0',
+ 'operation': 'start',
+ 'on_node': 'rh7-2',
+ },
],
- parsed["node_list"]
+ utils.get_operations_from_transitions(transitions)
)
- parsed = utils.parse_cman_quorum_info("""\
-Version: 6.2.0
-Config Version: 23
-Cluster Name: cluster66
-Cluster Id: 22265
-Cluster Member: Yes
-Cluster Generation: 3612
-Membership state: Cluster-Member
-Nodes: 3
-Expected votes: 3
-Total votes: 3
-Node votes: 1
-Quorum: 2 Activity blocked
-Active subsystems: 8
-Flags:
-Ports Bound: 0
-Node name: rh66-node1
-Node ID: 1
-Multicast addresses: 239.192.86.80
-Node addresses: 192.168.122.61
----Votes---
-1 M 3 rh66-node1
-2 X 2 rh66-node2
-3 X 1 rh66-node3
-""")
- self.assertEqual(False, parsed["quorate"])
- self.assertEqual(2, parsed["quorum"])
+ transitions = utils.parse(rc("transitions02.xml"))
self.assertEqual(
[
- {"name": "rh66-node1", "votes": 3, "local": True},
+ {
+ "id": "RemoteNode",
+ "long_id": "RemoteNode",
+ "operation": "stop",
+ "on_node": "virt-143",
+ },
+ {
+ "id": "RemoteNode",
+ "long_id": "RemoteNode",
+ "operation": "migrate_to",
+ "on_node": "virt-143",
+ },
+ {
+ "id": "RemoteNode",
+ "long_id": "RemoteNode",
+ "operation": "migrate_from",
+ "on_node": "virt-142",
+ },
+ {
+ "id": "dummy8",
+ "long_id": "dummy8",
+ "operation": "stop",
+ "on_node": "virt-143",
+ },
+ {
+ "id": "dummy8",
+ "long_id": "dummy8",
+ "operation": "start",
+ "on_node": "virt-142",
+ }
],
- parsed["node_list"]
+ utils.get_operations_from_transitions(transitions)
)
- parsed = utils.parse_cman_quorum_info("")
- self.assertEqual(None, parsed)
-
- parsed = utils.parse_cman_quorum_info("""\
-Version: 6.2.0
-Config Version: 23
-Cluster Name: cluster66
-Cluster Id: 22265
-Cluster Member: Yes
-Cluster Generation: 3612
-Membership state: Cluster-Member
-Nodes: 3
-Expected votes: 3
-Total votes: 3
-Node votes: 1
-Quorum:
-Active subsystems: 8
-Flags:
-Ports Bound: 0
-Node name: rh66-node2
-Node ID: 2
-Multicast addresses: 239.192.86.80
-Node addresses: 192.168.122.61
----Votes---
-1 M 3 rh66-node1
-2 M 2 rh66-node2
-3 M 1 rh66-node3
-""")
- self.assertEqual(None, parsed)
-
- parsed = utils.parse_cman_quorum_info("""\
-Version: 6.2.0
-Config Version: 23
-Cluster Name: cluster66
-Cluster Id: 22265
-Cluster Member: Yes
-Cluster Generation: 3612
-Membership state: Cluster-Member
-Nodes: 3
-Expected votes: 3
-Total votes: 3
-Node votes: 1
-Quorum: Foo
-Active subsystems: 8
-Flags:
-Ports Bound: 0
-Node name: rh66-node2
-Node ID: 2
-Multicast addresses: 239.192.86.80
-Node addresses: 192.168.122.61
----Votes---
-1 M 3 rh66-node1
-2 M 2 rh66-node2
-3 M 1 rh66-node3
-""")
- self.assertEqual(None, parsed)
-
- parsed = utils.parse_cman_quorum_info("""\
-Version: 6.2.0
-Config Version: 23
-Cluster Name: cluster66
-Cluster Id: 22265
-Cluster Member: Yes
-Cluster Generation: 3612
-Membership state: Cluster-Member
-Nodes: 3
-Expected votes: 3
-Total votes: 3
-Node votes: 1
-Quorum: 4
-Active subsystems: 8
-Flags:
-Ports Bound: 0
-Node name: rh66-node2
-Node ID: 2
-Multicast addresses: 239.192.86.80
-Node addresses: 192.168.122.61
----Votes---
-1 M 3 rh66-node1
-2 M Foo rh66-node2
-3 M 1 rh66-node3
-""")
- self.assertEqual(None, parsed)
-
- def test_parse_quorumtool_output(self):
- parsed = utils.parse_quorumtool_output("""\
-Quorum information
-------------------
-Date: Fri Jan 16 13:03:28 2015
-Quorum provider: corosync_votequorum
-Nodes: 3
-Node ID: 1
-Ring ID: 19860
-Quorate: Yes
-
-Votequorum information
-----------------------
-Expected votes: 3
-Highest expected: 3
-Total votes: 3
-Quorum: 2
-Flags: Quorate
+ def test_get_resources_location_from_operations(self):
+ cib_dom = self.get_cib_resources()
-Membership information
-----------------------
- Nodeid Votes Qdevice Name
- 1 3 NR rh70-node1
- 2 2 NR rh70-node2 (local)
- 3 1 NR rh70-node3
-""")
- self.assertEqual(True, parsed["quorate"])
- self.assertEqual(2, parsed["quorum"])
+ operations = []
self.assertEqual(
- [
- {"name": "rh70-node1", "votes": 3, "local": False},
- {"name": "rh70-node2", "votes": 2, "local": True},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- parsed["node_list"]
+ {},
+ utils.get_resources_location_from_operations(cib_dom, operations)
)
- parsed = utils.parse_quorumtool_output("""\
-Quorum information
-------------------
-Date: Fri Jan 16 13:03:35 2015
-Quorum provider: corosync_votequorum
-Nodes: 1
-Node ID: 1
-Ring ID: 19868
-Quorate: No
-
-Votequorum information
-----------------------
-Expected votes: 3
-Highest expected: 3
-Total votes: 1
-Quorum: 2 Activity blocked
-Flags:
+ operations = [
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "start",
+ "on_node": "rh7-1",
+ },
+ ]
+ self.assertEqual(
+ {
+ 'myResource': {
+ 'id': 'myResource',
+ 'id_for_constraint': 'myResource',
+ 'long_id': 'myResource',
+ 'start_on_node': 'rh7-1',
+ },
+ },
+ utils.get_resources_location_from_operations(cib_dom, operations)
+ )
-Membership information
-----------------------
- Nodeid Votes Qdevice Name
- 1 1 NR rh70-node1 (local)
-""")
- self.assertEqual(False, parsed["quorate"])
- self.assertEqual(2, parsed["quorum"])
+ operations = [
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "start",
+ "on_node": "rh7-1",
+ },
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "start",
+ "on_node": "rh7-2",
+ },
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "monitor",
+ "on_node": "rh7-3",
+ },
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "stop",
+ "on_node": "rh7-3",
+ },
+ ]
self.assertEqual(
- [
- {"name": "rh70-node1", "votes": 1, "local": True},
- ],
- parsed["node_list"]
+ {
+ 'myResource': {
+ 'id': 'myResource',
+ 'id_for_constraint': 'myResource',
+ 'long_id': 'myResource',
+ 'start_on_node': 'rh7-2',
+ },
+ },
+ utils.get_resources_location_from_operations(cib_dom, operations)
)
- parsed = utils.parse_quorumtool_output("")
- self.assertEqual(None, parsed)
+ operations = [
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "start",
+ "on_node": "rh7-1",
+ },
+ {
+ "id": "myClonedResource",
+ "long_id": "myClonedResource:0",
+ "operation": "start",
+ "on_node": "rh7-1",
+ },
+ {
+ "id": "myClonedResource",
+ "long_id": "myClonedResource:0",
+ "operation": "start",
+ "on_node": "rh7-2",
+ },
+ {
+ "id": "myClonedResource",
+ "long_id": "myClonedResource:1",
+ "operation": "start",
+ "on_node": "rh7-3",
+ },
+ ]
+ self.assertEqual(
+ {
+ 'myResource': {
+ 'id': 'myResource',
+ 'id_for_constraint': 'myResource',
+ 'long_id': 'myResource',
+ 'start_on_node': 'rh7-1',
+ },
+ 'myClonedResource:0': {
+ 'id': 'myClonedResource',
+ 'id_for_constraint': 'myClone',
+ 'long_id': 'myClonedResource:0',
+ 'start_on_node': 'rh7-2',
+ },
+ 'myClonedResource:1': {
+ 'id': 'myClonedResource',
+ 'id_for_constraint': 'myClone',
+ 'long_id': 'myClonedResource:1',
+ 'start_on_node': 'rh7-3',
+ },
+ },
+ utils.get_resources_location_from_operations(cib_dom, operations)
+ )
- parsed = utils.parse_quorumtool_output("""\
-Quorum information
-------------------
-Date: Fri Jan 16 13:03:28 2015
-Quorum provider: corosync_votequorum
-Nodes: 3
-Node ID: 1
-Ring ID: 19860
-Quorate: Yes
+ operations = [
+ {
+ "id": "myUniqueClonedResource:0",
+ "long_id": "myUniqueClonedResource:0",
+ "operation": "start",
+ "on_node": "rh7-1",
+ },
+ {
+ "id": "myUniqueClonedResource:1",
+ "long_id": "myUniqueClonedResource:1",
+ "operation": "monitor",
+ "on_node": "rh7-2",
+ },
+ {
+ "id": "myUniqueClonedResource:2",
+ "long_id": "myUniqueClonedResource:2",
+ "operation": "start",
+ "on_node": "rh7-3",
+ },
+ ]
+ self.assertEqual(
+ {
+ 'myUniqueClonedResource:0': {
+ 'id': 'myUniqueClonedResource:0',
+ 'id_for_constraint': 'myUniqueClone',
+ 'long_id': 'myUniqueClonedResource:0',
+ 'start_on_node': 'rh7-1',
+ },
+ 'myUniqueClonedResource:2': {
+ 'id': 'myUniqueClonedResource:2',
+ 'id_for_constraint': 'myUniqueClone',
+ 'long_id': 'myUniqueClonedResource:2',
+ 'start_on_node': 'rh7-3',
+ },
+ },
+ utils.get_resources_location_from_operations(cib_dom, operations)
+ )
-Votequorum information
-----------------------
-Expected votes: 3
-Highest expected: 3
-Total votes: 3
-Quorum:
-Flags: Quorate
+ operations = [
+ {
+ "id": "myMasteredGroupedResource",
+ "long_id": "myMasteredGroupedResource:0",
+ "operation": "start",
+ "on_node": "rh7-1",
+ },
+ {
+ "id": "myMasteredGroupedResource",
+ "long_id": "myMasteredGroupedResource:1",
+ "operation": "demote",
+ "on_node": "rh7-2",
+ },
+ {
+ "id": "myMasteredGroupedResource",
+ "long_id": "myMasteredGroupedResource:1",
+ "operation": "promote",
+ "on_node": "rh7-3",
+ },
+ ]
+ self.assertEqual(
+ {
+ 'myMasteredGroupedResource:0': {
+ 'id': 'myMasteredGroupedResource',
+ 'id_for_constraint': 'myGroupMaster',
+ 'long_id': 'myMasteredGroupedResource:0',
+ 'start_on_node': 'rh7-1',
+ },
+ 'myMasteredGroupedResource:1': {
+ 'id': 'myMasteredGroupedResource',
+ 'id_for_constraint': 'myGroupMaster',
+ 'long_id': 'myMasteredGroupedResource:1',
+ 'promote_on_node': 'rh7-3',
+ },
+ },
+ utils.get_resources_location_from_operations(cib_dom, operations)
+ )
-Membership information
-----------------------
- Nodeid Votes Qdevice Name
- 1 1 NR rh70-node1 (local)
- 2 1 NR rh70-node2
- 3 1 NR rh70-node3
-""")
- self.assertEqual(None, parsed)
+ operations = [
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "stop",
+ "on_node": "rh7-1",
+ },
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "migrate_to",
+ "on_node": "rh7-1",
+ },
+ {
+ "id": "myResource",
+ "long_id": "myResource",
+ "operation": "migrate_from",
+ "on_node": "rh7-2",
+ },
+ ]
+ self.assertEqual(
+ {
+ "myResource": {
+ "id": "myResource",
+ "id_for_constraint": "myResource",
+ "long_id": "myResource",
+ "start_on_node": "rh7-2",
+ },
+ },
+ utils.get_resources_location_from_operations(cib_dom, operations)
+ )
- parsed = utils.parse_quorumtool_output("""\
-Quorum information
-------------------
-Date: Fri Jan 16 13:03:28 2015
-Quorum provider: corosync_votequorum
-Nodes: 3
-Node ID: 1
-Ring ID: 19860
-Quorate: Yes
+ def test_is_int(self):
+ self.assertTrue(utils.is_int("-999"))
+ self.assertTrue(utils.is_int("-1"))
+ self.assertTrue(utils.is_int("0"))
+ self.assertTrue(utils.is_int("1"))
+ self.assertTrue(utils.is_int("99999"))
+ self.assertTrue(utils.is_int(" 99999 "))
+ self.assertFalse(utils.is_int("0.0"))
+ self.assertFalse(utils.is_int("-1.0"))
+ self.assertFalse(utils.is_int("-0.1"))
+ self.assertFalse(utils.is_int("0.001"))
+ self.assertFalse(utils.is_int("-999999.1"))
+ self.assertFalse(utils.is_int("0.0001"))
+ self.assertFalse(utils.is_int(""))
+ self.assertFalse(utils.is_int(" "))
+ self.assertFalse(utils.is_int("A"))
+ self.assertFalse(utils.is_int("random 15 47 text "))
-Votequorum information
-----------------------
-Expected votes: 3
-Highest expected: 3
-Total votes: 3
-Quorum: Foo
-Flags: Quorate
+ def test_dom_get_node(self):
+ cib = self.get_cib_with_nodes_minidom()
+ #assertIsNone is not supported in python 2.6
+ self.assertTrue(utils.dom_get_node(cib, "non-existing-node") is None)
+ node = utils.dom_get_node(cib, "rh7-1")
+ self.assertEqual(node.getAttribute("uname"), "rh7-1")
+ self.assertEqual(node.getAttribute("id"), "1")
-Membership information
-----------------------
- Nodeid Votes Qdevice Name
- 1 1 NR rh70-node1 (local)
- 2 1 NR rh70-node2
- 3 1 NR rh70-node3
-""")
- self.assertEqual(None, parsed)
+ def test_dom_prepare_child_element(self):
+ cib = self.get_cib_with_nodes_minidom()
+ node = cib.getElementsByTagName("node")[0]
+ self.assertEqual(len(dom_get_child_elements(node)), 0)
+ child = utils.dom_prepare_child_element(
+ node, "utilization", "rh7-1-utilization"
+ )
+ self.assertEqual(len(dom_get_child_elements(node)), 1)
+ self.assertEqual(child, dom_get_child_elements(node)[0])
+ self.assertEqual(dom_get_child_elements(node)[0].tagName, "utilization")
+ self.assertEqual(
+ dom_get_child_elements(node)[0].getAttribute("id"),
+ "rh7-1-utilization"
+ )
+ child2 = utils.dom_prepare_child_element(
+ node, "utilization", "rh7-1-utilization"
+ )
+ self.assertEqual(len(dom_get_child_elements(node)), 1)
+ self.assertEqual(child, child2)
- parsed = utils.parse_quorumtool_output("""\
-Quorum information
-------------------
-Date: Fri Jan 16 13:03:28 2015
-Quorum provider: corosync_votequorum
-Nodes: 3
-Node ID: 1
-Ring ID: 19860
-Quorate: Yes
+ def test_dom_update_nv_pair_add(self):
+ nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement
+ utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-")
+ self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
+ pair = dom_get_child_elements(nv_set)[0]
+ self.assertEqual(pair.getAttribute("name"), "test_name")
+ self.assertEqual(pair.getAttribute("value"), "test_val")
+ self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
+ utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-")
+ self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
+ self.assertEqual(pair, dom_get_child_elements(nv_set)[0])
+ pair = dom_get_child_elements(nv_set)[1]
+ self.assertEqual(pair.getAttribute("name"), "another_name")
+ self.assertEqual(pair.getAttribute("value"), "value")
+ self.assertEqual(pair.getAttribute("id"), "prefix2-another_name")
-Votequorum information
-----------------------
-Expected votes: 3
-Highest expected: 3
-Total votes: 3
-Quorum: 2
-Flags: Quorate
+ def test_dom_update_nv_pair_update(self):
+ nv_set = xml.dom.minidom.parseString("""
+ <nv_set>
+ <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
+ <nvpair id="prefix2-another_name" name="another_name" value="value"/>
+ </nv_set>
+ """).documentElement
+ utils.dom_update_nv_pair(nv_set, "test_name", "new_value")
+ self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
+ pair1 = dom_get_child_elements(nv_set)[0]
+ pair2 = dom_get_child_elements(nv_set)[1]
+ self.assertEqual(pair1.getAttribute("name"), "test_name")
+ self.assertEqual(pair1.getAttribute("value"), "new_value")
+ self.assertEqual(pair1.getAttribute("id"), "prefix-test_name")
+ self.assertEqual(pair2.getAttribute("name"), "another_name")
+ self.assertEqual(pair2.getAttribute("value"), "value")
+ self.assertEqual(pair2.getAttribute("id"), "prefix2-another_name")
-Membership information
-----------------------
- Nodeid Votes Qdevice Name
- 1 1 NR rh70-node1 (local)
- 2 foo NR rh70-node2
- 3 1 NR rh70-node3
-""")
- self.assertEqual(None, parsed)
+ def test_dom_update_nv_pair_remove(self):
+ nv_set = xml.dom.minidom.parseString("""
+ <nv_set>
+ <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
+ <nvpair id="prefix2-another_name" name="another_name" value="value"/>
+ </nv_set>
+ """).documentElement
+ utils.dom_update_nv_pair(nv_set, "non_existing_name", "")
+ self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
+ utils.dom_update_nv_pair(nv_set, "another_name", "")
+ self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
+ pair = dom_get_child_elements(nv_set)[0]
+ self.assertEqual(pair.getAttribute("name"), "test_name")
+ self.assertEqual(pair.getAttribute("value"), "test_val")
+ self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
+ utils.dom_update_nv_pair(nv_set, "test_name", "")
+ self.assertEqual(len(dom_get_child_elements(nv_set)), 0)
- def test_is_node_stop_cause_quorum_loss(self):
- quorum_info = {
- "quorate": False,
- }
+ def test_convert_args_to_tuples(self):
+ out = utils.convert_args_to_tuples(
+ ["invalid_string", "key=value", "key2=val=ue", "k e y= v a l u e "]
+ )
self.assertEqual(
- False,
- utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ out,
+ [("key", "value"), ("key2", "val=ue"), ("k e y", " v a l u e ")]
)
- quorum_info = {
- "quorate": True,
- "quorum": 1,
- "node_list": [
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
- self.assertEqual(
- False,
- utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ def test_dom_update_utilization_invalid(self):
+ #commands writes to stderr
+ #we want clean test output, so we capture it
+ tmp_stderr = sys.stderr
+ sys.stderr = StringIO()
+
+ el = xml.dom.minidom.parseString("""
+ <resource id="test_id"/>
+ """).documentElement
+ self.assertRaises(
+ SystemExit,
+ utils.dom_update_utilization, el, [("name", "invalid_val")]
)
- quorum_info = {
- "quorate": True,
- "quorum": 1,
- "node_list": [
- {"name": "rh70-node3", "votes": 1, "local": True},
- ],
- }
- self.assertEqual(
- True,
- utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ self.assertRaises(
+ SystemExit,
+ utils.dom_update_utilization, el, [("name", "0.01")]
)
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 3, "local": False},
- {"name": "rh70-node2", "votes": 2, "local": False},
- {"name": "rh70-node3", "votes": 1, "local": True},
- ],
- }
- self.assertEqual(
- False,
- utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ sys.stderr = tmp_stderr
+
+ def test_dom_update_utilization_add(self):
+ el = xml.dom.minidom.parseString("""
+ <resource id="test_id"/>
+ """).documentElement
+ utils.dom_update_utilization(
+ el, [("name", ""), ("key", "-1"), ("keys", "90")]
)
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 3, "local": False},
- {"name": "rh70-node2", "votes": 2, "local": True},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
+ self.assertEqual(len(dom_get_child_elements(el)), 1)
+ u = dom_get_child_elements(el)[0]
+ self.assertEqual(u.tagName, "utilization")
+ self.assertEqual(u.getAttribute("id"), "test_id-utilization")
+ self.assertEqual(len(dom_get_child_elements(u)), 2)
+
self.assertEqual(
- False,
- utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ dom_get_child_elements(u)[0].getAttribute("id"),
+ "test_id-utilization-key"
+ )
+ self.assertEqual(
+ dom_get_child_elements(u)[0].getAttribute("name"),
+ "key"
+ )
+ self.assertEqual(
+ dom_get_child_elements(u)[0].getAttribute("value"),
+ "-1"
)
-
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 3, "local": True},
- {"name": "rh70-node2", "votes": 2, "local": False},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
self.assertEqual(
- True,
- utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ dom_get_child_elements(u)[1].getAttribute("id"),
+ "test_id-utilization-keys"
)
-
-
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 3, "local": True},
- {"name": "rh70-node2", "votes": 2, "local": False},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
self.assertEqual(
- False,
- utils.is_node_stop_cause_quorum_loss(
- quorum_info, False, ["rh70-node3"]
- )
+ dom_get_child_elements(u)[1].getAttribute("name"),
+ "keys"
)
-
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 3, "local": True},
- {"name": "rh70-node2", "votes": 2, "local": False},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
self.assertEqual(
- False,
- utils.is_node_stop_cause_quorum_loss(
- quorum_info, False, ["rh70-node2"]
- )
+ dom_get_child_elements(u)[1].getAttribute("value"),
+ "90"
)
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 3, "local": True},
- {"name": "rh70-node2", "votes": 2, "local": False},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
- self.assertEqual(
- True,
- utils.is_node_stop_cause_quorum_loss(
- quorum_info, False, ["rh70-node1"]
- )
+ def test_dom_update_utilization_update_remove(self):
+ el = xml.dom.minidom.parseString("""
+ <resource id="test_id">
+ <utilization id="test_id-utilization">
+ <nvpair id="test_id-utilization-key" name="key" value="-1"/>
+ <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
+ </utilization>
+ </resource>
+ """).documentElement
+ utils.dom_update_utilization(
+ el, [("key", "100"), ("keys", "")]
)
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 4, "local": True},
- {"name": "rh70-node2", "votes": 1, "local": False},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
+ u = dom_get_child_elements(el)[0]
+ self.assertEqual(len(dom_get_child_elements(u)), 1)
self.assertEqual(
- False,
- utils.is_node_stop_cause_quorum_loss(
- quorum_info, False, ["rh70-node2", "rh70-node3"]
- )
+ dom_get_child_elements(u)[0].getAttribute("id"),
+ "test_id-utilization-key"
)
-
- quorum_info = {
- "quorate": True,
- "quorum": 4,
- "node_list": [
- {"name": "rh70-node1", "votes": 3, "local": True},
- {"name": "rh70-node2", "votes": 2, "local": False},
- {"name": "rh70-node3", "votes": 1, "local": False},
- ],
- }
self.assertEqual(
- True,
- utils.is_node_stop_cause_quorum_loss(
- quorum_info, False, ["rh70-node2", "rh70-node3"]
- )
+ dom_get_child_elements(u)[0].getAttribute("name"),
+ "key"
)
-
- def test_get_operations_from_transitions(self):
- transitions = utils.parse(rc("transitions01.xml"))
self.assertEqual(
- [
- {
- 'id': 'dummy',
- 'long_id': 'dummy',
- 'operation': 'stop',
- 'on_node': 'rh7-3',
- },
- {
- 'id': 'dummy',
- 'long_id': 'dummy',
- 'operation': 'start',
- 'on_node': 'rh7-2',
- },
- {
- 'id': 'd0',
- 'long_id': 'd0:1',
- 'operation': 'stop',
- 'on_node': 'rh7-1',
- },
- {
- 'id': 'd0',
- 'long_id': 'd0:1',
- 'operation': 'start',
- 'on_node': 'rh7-2',
- },
- {
- 'id': 'state',
- 'long_id': 'state:0',
- 'operation': 'stop',
- 'on_node': 'rh7-3',
- },
- {
- 'id': 'state',
- 'long_id': 'state:0',
- 'operation': 'start',
- 'on_node': 'rh7-2',
- },
- ],
- utils.get_operations_from_transitions(transitions)
+ dom_get_child_elements(u)[0].getAttribute("value"),
+ "100"
)
- transitions = utils.parse(rc("transitions02.xml"))
- self.assertEqual(
- [
- {
- "id": "RemoteNode",
- "long_id": "RemoteNode",
- "operation": "stop",
- "on_node": "virt-143",
- },
- {
- "id": "RemoteNode",
- "long_id": "RemoteNode",
- "operation": "migrate_to",
- "on_node": "virt-143",
- },
- {
- "id": "RemoteNode",
- "long_id": "RemoteNode",
- "operation": "migrate_from",
- "on_node": "virt-142",
- },
- {
- "id": "dummy8",
- "long_id": "dummy8",
- "operation": "stop",
- "on_node": "virt-143",
- },
- {
- "id": "dummy8",
- "long_id": "dummy8",
- "operation": "start",
- "on_node": "virt-142",
- }
- ],
- utils.get_operations_from_transitions(transitions)
+ def test_dom_update_meta_attr_add(self):
+ el = xml.dom.minidom.parseString("""
+ <resource id="test_id"/>
+ """).documentElement
+ utils.dom_update_meta_attr(
+ el, [("name", ""), ("key", "test"), ("key2", "val")]
)
- def test_get_resources_location_from_operations(self):
- cib_dom = self.get_cib_resources()
+ self.assertEqual(len(dom_get_child_elements(el)), 1)
+ u = dom_get_child_elements(el)[0]
+ self.assertEqual(u.tagName, "meta_attributes")
+ self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes")
+ self.assertEqual(len(dom_get_child_elements(u)), 2)
- operations = []
self.assertEqual(
- {},
- utils.get_resources_location_from_operations(cib_dom, operations)
+ dom_get_child_elements(u)[0].getAttribute("id"),
+ "test_id-meta_attributes-key"
)
-
- operations = [
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "start",
- "on_node": "rh7-1",
- },
- ]
self.assertEqual(
- {
- 'myResource': {
- 'id': 'myResource',
- 'id_for_constraint': 'myResource',
- 'long_id': 'myResource',
- 'start_on_node': 'rh7-1',
- },
- },
- utils.get_resources_location_from_operations(cib_dom, operations)
+ dom_get_child_elements(u)[0].getAttribute("name"),
+ "key"
+ )
+ self.assertEqual(
+ dom_get_child_elements(u)[0].getAttribute("value"),
+ "test"
)
-
- operations = [
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "start",
- "on_node": "rh7-1",
- },
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "start",
- "on_node": "rh7-2",
- },
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "monitor",
- "on_node": "rh7-3",
- },
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "stop",
- "on_node": "rh7-3",
- },
- ]
self.assertEqual(
- {
- 'myResource': {
- 'id': 'myResource',
- 'id_for_constraint': 'myResource',
- 'long_id': 'myResource',
- 'start_on_node': 'rh7-2',
- },
- },
- utils.get_resources_location_from_operations(cib_dom, operations)
+ dom_get_child_elements(u)[1].getAttribute("id"),
+ "test_id-meta_attributes-key2"
)
-
- operations = [
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "start",
- "on_node": "rh7-1",
- },
- {
- "id": "myClonedResource",
- "long_id": "myClonedResource:0",
- "operation": "start",
- "on_node": "rh7-1",
- },
- {
- "id": "myClonedResource",
- "long_id": "myClonedResource:0",
- "operation": "start",
- "on_node": "rh7-2",
- },
- {
- "id": "myClonedResource",
- "long_id": "myClonedResource:1",
- "operation": "start",
- "on_node": "rh7-3",
- },
- ]
self.assertEqual(
- {
- 'myResource': {
- 'id': 'myResource',
- 'id_for_constraint': 'myResource',
- 'long_id': 'myResource',
- 'start_on_node': 'rh7-1',
- },
- 'myClonedResource:0': {
- 'id': 'myClonedResource',
- 'id_for_constraint': 'myClone',
- 'long_id': 'myClonedResource:0',
- 'start_on_node': 'rh7-2',
- },
- 'myClonedResource:1': {
- 'id': 'myClonedResource',
- 'id_for_constraint': 'myClone',
- 'long_id': 'myClonedResource:1',
- 'start_on_node': 'rh7-3',
- },
- },
- utils.get_resources_location_from_operations(cib_dom, operations)
+ dom_get_child_elements(u)[1].getAttribute("name"),
+ "key2"
)
-
- operations = [
- {
- "id": "myUniqueClonedResource:0",
- "long_id": "myUniqueClonedResource:0",
- "operation": "start",
- "on_node": "rh7-1",
- },
- {
- "id": "myUniqueClonedResource:1",
- "long_id": "myUniqueClonedResource:1",
- "operation": "monitor",
- "on_node": "rh7-2",
- },
- {
- "id": "myUniqueClonedResource:2",
- "long_id": "myUniqueClonedResource:2",
- "operation": "start",
- "on_node": "rh7-3",
- },
- ]
self.assertEqual(
- {
- 'myUniqueClonedResource:0': {
- 'id': 'myUniqueClonedResource:0',
- 'id_for_constraint': 'myUniqueClone',
- 'long_id': 'myUniqueClonedResource:0',
- 'start_on_node': 'rh7-1',
- },
- 'myUniqueClonedResource:2': {
- 'id': 'myUniqueClonedResource:2',
- 'id_for_constraint': 'myUniqueClone',
- 'long_id': 'myUniqueClonedResource:2',
- 'start_on_node': 'rh7-3',
- },
- },
- utils.get_resources_location_from_operations(cib_dom, operations)
+ dom_get_child_elements(u)[1].getAttribute("value"),
+ "val"
)
- operations = [
- {
- "id": "myMasteredGroupedResource",
- "long_id": "myMasteredGroupedResource:0",
- "operation": "start",
- "on_node": "rh7-1",
- },
- {
- "id": "myMasteredGroupedResource",
- "long_id": "myMasteredGroupedResource:1",
- "operation": "demote",
- "on_node": "rh7-2",
- },
- {
- "id": "myMasteredGroupedResource",
- "long_id": "myMasteredGroupedResource:1",
- "operation": "promote",
- "on_node": "rh7-3",
- },
- ]
+ def test_dom_update_meta_attr_update_remove(self):
+ el = xml.dom.minidom.parseString("""
+ <resource id="test_id">
+ <meta_attributes id="test_id-utilization">
+ <nvpair id="test_id-meta_attributes-key" name="key" value="test"/>
+ <nvpair id="test_id-meta_attributes-key2" name="key2" value="val"/>
+ </meta_attributes>
+ </resource>
+ """).documentElement
+ utils.dom_update_meta_attr(
+ el, [("key", "another_val"), ("key2", "")]
+ )
+
+ u = dom_get_child_elements(el)[0]
+ self.assertEqual(len(dom_get_child_elements(u)), 1)
self.assertEqual(
- {
- 'myMasteredGroupedResource:0': {
- 'id': 'myMasteredGroupedResource',
- 'id_for_constraint': 'myGroupMaster',
- 'long_id': 'myMasteredGroupedResource:0',
- 'start_on_node': 'rh7-1',
- },
- 'myMasteredGroupedResource:1': {
- 'id': 'myMasteredGroupedResource',
- 'id_for_constraint': 'myGroupMaster',
- 'long_id': 'myMasteredGroupedResource:1',
- 'promote_on_node': 'rh7-3',
- },
- },
- utils.get_resources_location_from_operations(cib_dom, operations)
+ dom_get_child_elements(u)[0].getAttribute("id"),
+ "test_id-meta_attributes-key"
+ )
+ self.assertEqual(
+ dom_get_child_elements(u)[0].getAttribute("name"),
+ "key"
+ )
+ self.assertEqual(
+ dom_get_child_elements(u)[0].getAttribute("value"),
+ "another_val"
)
- operations = [
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "stop",
- "on_node": "rh7-1",
- },
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "migrate_to",
- "on_node": "rh7-1",
- },
- {
- "id": "myResource",
- "long_id": "myResource",
- "operation": "migrate_from",
- "on_node": "rh7-2",
+ def test_get_utilization(self):
+ el = xml.dom.minidom.parseString("""
+ <resource id="test_id">
+ <utilization id="test_id-utilization">
+ <nvpair id="test_id-utilization-key" name="key" value="-1"/>
+ <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
+ </utilization>
+ </resource>
+ """).documentElement
+ self.assertEqual({"key": "-1", "keys": "90"}, utils.get_utilization(el))
+
+ def test_get_utilization_str(self):
+ el = xml.dom.minidom.parseString("""
+ <resource id="test_id">
+ <utilization id="test_id-utilization">
+ <nvpair id="test_id-utilization-key" name="key" value="-1"/>
+ <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
+ </utilization>
+ </resource>
+ """).documentElement
+ self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el))
+
+ def test_get_cluster_property_from_xml_enum(self):
+ el = ET.fromstring("""
+ <parameter name="no-quorum-policy" unique="0">
+ <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
+ <content type="enum" default="stop"/>
+ <longdesc lang="en">What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide</longdesc>
+ </parameter>
+ """)
+ expected = {
+ "name": "no-quorum-policy",
+ "shortdesc": "What to do when the cluster does not have quorum",
+ "longdesc": "",
+ "type": "enum",
+ "default": "stop",
+ "enum": ["stop", "freeze", "ignore", "suicide"]
+ }
+ self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
+
+ def test_get_cluster_property_from_xml(self):
+ el = ET.fromstring("""
+ <parameter name="default-resource-stickiness" unique="0">
+ <shortdesc lang="en"></shortdesc>
+ <content type="integer" default="0"/>
+ <longdesc lang="en"></longdesc>
+ </parameter>
+ """)
+ expected = {
+ "name": "default-resource-stickiness",
+ "shortdesc": "",
+ "longdesc": "",
+ "type": "integer",
+ "default": "0"
+ }
+ self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
+
+ def test_get_cluster_property_default(self):
+ definition = {
+ "default-resource-stickiness": {
+ "name": "default-resource-stickiness",
+ "shortdesc": "",
+ "longdesc": "",
+ "type": "integer",
+ "default": "0",
+ "source": "pengine"
},
- ]
- self.assertEqual(
- {
- "myResource": {
- "id": "myResource",
- "id_for_constraint": "myResource",
- "long_id": "myResource",
- "start_on_node": "rh7-2",
- },
+ "no-quorum-policy": {
+ "name": "no-quorum-policy",
+ "shortdesc": "What to do when the cluster does not have quorum",
+ "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide",
+ "type": "enum",
+ "default": "stop",
+ "enum": ["stop", "freeze", "ignore", "suicide"],
+ "source": "pengine"
},
- utils.get_resources_location_from_operations(cib_dom, operations)
+ "enable-acl": {
+ "name": "enable-acl",
+ "shortdesc": "Enable CIB ACL",
+ "longdesc": "Enable CIB ACL",
+ "type": "boolean",
+ "default": "false",
+ "source": "cib"
+ }
+ }
+ self.assertEqual(
+ utils.get_cluster_property_default(
+ definition, "default-resource-stickiness"
+ ),
+ "0"
)
-
- def test_is_int(self):
- self.assertTrue(utils.is_int("-999"))
- self.assertTrue(utils.is_int("-1"))
- self.assertTrue(utils.is_int("0"))
- self.assertTrue(utils.is_int("1"))
- self.assertTrue(utils.is_int("99999"))
- self.assertTrue(utils.is_int(" 99999 "))
- self.assertFalse(utils.is_int("0.0"))
- self.assertFalse(utils.is_int("-1.0"))
- self.assertFalse(utils.is_int("-0.1"))
- self.assertFalse(utils.is_int("0.001"))
- self.assertFalse(utils.is_int("-999999.1"))
- self.assertFalse(utils.is_int("0.0001"))
- self.assertFalse(utils.is_int(""))
- self.assertFalse(utils.is_int(" "))
- self.assertFalse(utils.is_int("A"))
- self.assertFalse(utils.is_int("random 15 47 text "))
-
- def test_dom_get_node(self):
- cib = self.get_cib_with_nodes_minidom()
- #assertIsNone is not supported in python 2.6
- self.assertTrue(utils.dom_get_node(cib, "non-existing-node") is None)
- node = utils.dom_get_node(cib, "rh7-1")
- self.assertEqual(node.getAttribute("uname"), "rh7-1")
- self.assertEqual(node.getAttribute("id"), "1")
-
- def test_dom_prepare_child_element(self):
- cib = self.get_cib_with_nodes_minidom()
- node = cib.getElementsByTagName("node")[0]
- self.assertEqual(len(dom_get_child_elements(node)), 0)
- child = utils.dom_prepare_child_element(
- node, "utilization", "rh7-1-utilization"
+ self.assertEqual(
+ utils.get_cluster_property_default(definition, "no-quorum-policy"),
+ "stop"
)
- self.assertEqual(len(dom_get_child_elements(node)), 1)
- self.assertEqual(child, dom_get_child_elements(node)[0])
- self.assertEqual(dom_get_child_elements(node)[0].tagName, "utilization")
self.assertEqual(
- dom_get_child_elements(node)[0].getAttribute("id"),
- "rh7-1-utilization"
+ utils.get_cluster_property_default(definition, "enable-acl"),
+ "false"
)
- child2 = utils.dom_prepare_child_element(
- node, "utilization", "rh7-1-utilization"
+ self.assertRaises(
+ utils.UnknownPropertyException,
+ utils.get_cluster_property_default, definition, "non-existing"
)
- self.assertEqual(len(dom_get_child_elements(node)), 1)
- self.assertEqual(child, child2)
- def test_dom_update_nv_pair_add(self):
- nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement
- utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-")
- self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
- pair = dom_get_child_elements(nv_set)[0]
- self.assertEqual(pair.getAttribute("name"), "test_name")
- self.assertEqual(pair.getAttribute("value"), "test_val")
- self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
- utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-")
- self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
- self.assertEqual(pair, dom_get_child_elements(nv_set)[0])
- pair = dom_get_child_elements(nv_set)[1]
- self.assertEqual(pair.getAttribute("name"), "another_name")
- self.assertEqual(pair.getAttribute("value"), "value")
- self.assertEqual(pair.getAttribute("id"), "prefix2-another_name")
+ def test_is_valid_cib_value_unknown_type(self):
+ # should be always true
+ self.assertTrue(utils.is_valid_cib_value("unknown", "test"))
+ self.assertTrue(utils.is_valid_cib_value("string", "string value"))
- def test_dom_update_nv_pair_update(self):
- nv_set = xml.dom.minidom.parseString("""
- <nv_set>
- <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
- <nvpair id="prefix2-another_name" name="another_name" value="value"/>
- </nv_set>
- """).documentElement
- utils.dom_update_nv_pair(nv_set, "test_name", "new_value")
- self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
- pair1 = dom_get_child_elements(nv_set)[0]
- pair2 = dom_get_child_elements(nv_set)[1]
- self.assertEqual(pair1.getAttribute("name"), "test_name")
- self.assertEqual(pair1.getAttribute("value"), "new_value")
- self.assertEqual(pair1.getAttribute("id"), "prefix-test_name")
- self.assertEqual(pair2.getAttribute("name"), "another_name")
- self.assertEqual(pair2.getAttribute("value"), "value")
- self.assertEqual(pair2.getAttribute("id"), "prefix2-another_name")
+ def test_is_valid_cib_value_integer(self):
+ self.assertTrue(utils.is_valid_cib_value("integer", "0"))
+ self.assertTrue(utils.is_valid_cib_value("integer", "42"))
+ self.assertTrue(utils.is_valid_cib_value("integer", "-90"))
+ self.assertTrue(utils.is_valid_cib_value("integer", "+90"))
+ self.assertTrue(utils.is_valid_cib_value("integer", "INFINITY"))
+ self.assertTrue(utils.is_valid_cib_value("integer", "-INFINITY"))
+ self.assertTrue(utils.is_valid_cib_value("integer", "+INFINITY"))
+ self.assertFalse(utils.is_valid_cib_value("integer", "0.0"))
+ self.assertFalse(utils.is_valid_cib_value("integer", "-10.9"))
+ self.assertFalse(utils.is_valid_cib_value("integer", "string"))
- def test_dom_update_nv_pair_remove(self):
- nv_set = xml.dom.minidom.parseString("""
- <nv_set>
- <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
- <nvpair id="prefix2-another_name" name="another_name" value="value"/>
- </nv_set>
- """).documentElement
- utils.dom_update_nv_pair(nv_set, "non_existing_name", "")
- self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
- utils.dom_update_nv_pair(nv_set, "another_name", "")
- self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
- pair = dom_get_child_elements(nv_set)[0]
- self.assertEqual(pair.getAttribute("name"), "test_name")
- self.assertEqual(pair.getAttribute("value"), "test_val")
- self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
- utils.dom_update_nv_pair(nv_set, "test_name", "")
- self.assertEqual(len(dom_get_child_elements(nv_set)), 0)
+ def test_is_valid_cib_value_enum(self):
+ self.assertTrue(
+ utils.is_valid_cib_value("enum", "this", ["another", "this", "1"])
+ )
+ self.assertFalse(
+ utils.is_valid_cib_value("enum", "this", ["another", "this_not"])
+ )
+ self.assertFalse(utils.is_valid_cib_value("enum", "this", []))
+ self.assertFalse(utils.is_valid_cib_value("enum", "this"))
- def test_convert_args_to_tuples(self):
- out = utils.convert_args_to_tuples(
- ["invalid_string", "key=value", "key2=val=ue", "k e y= v a l u e "]
+ def test_is_valid_cib_value_boolean(self):
+ self.assertTrue(utils.is_valid_cib_value("boolean", "true"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "TrUe"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "TRUE"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "yes"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "on"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "y"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "Y"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "1"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "false"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "FaLse"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "FALSE"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "off"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "no"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "N"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "n"))
+ self.assertTrue(utils.is_valid_cib_value("boolean", "0"))
+ self.assertFalse(utils.is_valid_cib_value("boolean", "-1"))
+ self.assertFalse(utils.is_valid_cib_value("boolean", "not"))
+ self.assertFalse(utils.is_valid_cib_value("boolean", "random_string"))
+ self.assertFalse(utils.is_valid_cib_value("boolean", "truth"))
+
+ def test_is_valid_cib_value_time(self):
+ self.assertTrue(utils.is_valid_cib_value("time", "10"))
+ self.assertTrue(utils.is_valid_cib_value("time", "0"))
+ self.assertTrue(utils.is_valid_cib_value("time", "9s"))
+ self.assertTrue(utils.is_valid_cib_value("time", "10sec"))
+ self.assertTrue(utils.is_valid_cib_value("time", "10min"))
+ self.assertTrue(utils.is_valid_cib_value("time", "10m"))
+ self.assertTrue(utils.is_valid_cib_value("time", "10h"))
+ self.assertTrue(utils.is_valid_cib_value("time", "10hr"))
+ self.assertFalse(utils.is_valid_cib_value("time", "5.2"))
+ self.assertFalse(utils.is_valid_cib_value("time", "-10"))
+ self.assertFalse(utils.is_valid_cib_value("time", "10m 2s"))
+ self.assertFalse(utils.is_valid_cib_value("time", "hour"))
+ self.assertFalse(utils.is_valid_cib_value("time", "day"))
+
+ def test_validate_cluster_property(self):
+ definition = {
+ "default-resource-stickiness": {
+ "name": "default-resource-stickiness",
+ "shortdesc": "",
+ "longdesc": "",
+ "type": "integer",
+ "default": "0",
+ "source": "pengine"
+ },
+ "no-quorum-policy": {
+ "name": "no-quorum-policy",
+ "shortdesc": "What to do when the cluster does not have quorum",
+ "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide",
+ "type": "enum",
+ "default": "stop",
+ "enum": ["stop", "freeze", "ignore", "suicide"],
+ "source": "pengine"
+ },
+ "enable-acl": {
+ "name": "enable-acl",
+ "shortdesc": "Enable CIB ACL",
+ "longdesc": "Enable CIB ACL",
+ "type": "boolean",
+ "default": "false",
+ "source": "cib"
+ }
+ }
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "default-resource-stickiness", "10"
+ ))
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "default-resource-stickiness", "-1"
+ ))
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "no-quorum-policy", "freeze"
+ ))
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "no-quorum-policy", "suicide"
+ ))
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "enable-acl", "true"
+ ))
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "enable-acl", "false"
+ ))
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "enable-acl", "on"
+ ))
+ self.assertTrue(utils.is_valid_cluster_property(
+ definition, "enable-acl", "OFF"
+ ))
+ self.assertFalse(utils.is_valid_cluster_property(
+ definition, "default-resource-stickiness", "test"
+ ))
+ self.assertFalse(utils.is_valid_cluster_property(
+ definition, "default-resource-stickiness", "1.2"
+ ))
+ self.assertFalse(utils.is_valid_cluster_property(
+ definition, "no-quorum-policy", "invalid"
+ ))
+ self.assertFalse(utils.is_valid_cluster_property(
+ definition, "enable-acl", "not"
+ ))
+ self.assertRaises(
+ utils.UnknownPropertyException,
+ utils.is_valid_cluster_property, definition, "unknown", "value"
)
- self.assertEqual(
- out,
- [("key", "value"), ("key2", "val=ue"), ("k e y", " v a l u e ")]
+
+ def assert_element_id(self, node, node_id):
+ self.assertTrue(
+ isinstance(node, xml.dom.minidom.Element),
+ "element with id '%s' not found" % node_id
)
+ self.assertEqual(node.getAttribute("id"), node_id)
- def test_dom_update_utilization_invalid(self):
- #commands writes to stderr
- #we want clean test output, so we capture it
- tmp_stderr = sys.stderr
- sys.stderr = StringIO()
- el = xml.dom.minidom.parseString("""
- <resource id="test_id"/>
- """).documentElement
- self.assertRaises(
- SystemExit,
- utils.dom_update_utilization, el, [("name", "invalid_val")]
- )
+class RunParallelTest(unittest.TestCase):
+ def fixture_create_worker(self, log, name, sleepSeconds=0):
+ def worker():
+ sleep(sleepSeconds)
+ log.append(name)
+ return worker
- self.assertRaises(
- SystemExit,
- utils.dom_update_utilization, el, [("name", "0.01")]
+ def test_run_all_workers(self):
+ log = []
+ utils.run_parallel(
+ [
+ self.fixture_create_worker(log, 'first'),
+ self.fixture_create_worker(log, 'second'),
+ ],
+ wait_seconds=.1
)
- sys.stderr = tmp_stderr
+ self.assertEqual(log, ['first', 'second'])
- def test_dom_update_utilization_add(self):
- el = xml.dom.minidom.parseString("""
- <resource id="test_id"/>
- """).documentElement
- utils.dom_update_utilization(
- el, [("name", ""), ("key", "-1"), ("keys", "90")]
+ def test_wait_for_slower_workers(self):
+ log = []
+ utils.run_parallel(
+ [
+ self.fixture_create_worker(log, 'first', .03),
+ self.fixture_create_worker(log, 'second'),
+ ],
+ wait_seconds=.01
)
- self.assertEqual(len(dom_get_child_elements(el)), 1)
- u = dom_get_child_elements(el)[0]
- self.assertEqual(u.tagName, "utilization")
- self.assertEqual(u.getAttribute("id"), "test_id-utilization")
- self.assertEqual(len(dom_get_child_elements(u)), 2)
+ self.assertEqual(log, ['second', 'first'])
+
+class PrepareNodeNamesTest(unittest.TestCase):
+ def test_return_original_when_is_in_pacemaker_nodes(self):
+ node = 'test'
self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("id"),
- "test_id-utilization-key"
+ node,
+ utils.prepare_node_name(node, {1: node}, {})
)
+
+ def test_return_original_when_is_not_in_corosync_nodes(self):
+ node = 'test'
self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("name"),
- "key"
+ node,
+ utils.prepare_node_name(node, {}, {})
)
+
+ def test_return_original_when_corosync_id_not_in_pacemaker(self):
+ node = 'test'
self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("value"),
- "-1"
+ node,
+ utils.prepare_node_name(node, {}, {1: node})
)
+
+ def test_return_modified_name(self):
+ node = 'test'
self.assertEqual(
- dom_get_child_elements(u)[1].getAttribute("id"),
- "test_id-utilization-keys"
+ 'another (test)',
+ utils.prepare_node_name(node, {1: 'another'}, {1: node})
)
+
+ def test_return_modified_name_with_pm_null_case(self):
+ node = 'test'
self.assertEqual(
- dom_get_child_elements(u)[1].getAttribute("name"),
- "keys"
+ '*Unknown* (test)',
+ utils.prepare_node_name(node, {1: '(null)'}, {1: node})
+ )
+
+
+class NodeActionTaskTest(unittest.TestCase):
+ def test_can_run_action(self):
+ def action(node, arg, kwarg=None):
+ return (0, ':'.join([node, arg, kwarg]))
+
+ report_list = []
+ def report(node, returncode, output):
+ report_list.append('|'.join([node, str(returncode), output]))
+
+ task = utils.create_task(report, action, 'node', 'arg', kwarg='kwarg')
+ task()
+
+ self.assertEqual(['node|0|node:arg:kwarg'], report_list)
+
+
+class ParseCmanQuorumInfoTest(unittest.TestCase):
+ def test_error_empty_string(self):
+ parsed = utils.parse_cman_quorum_info("")
+ self.assertEqual(None, parsed)
+
+ def test_quorate_no_qdevice(self):
+ parsed = utils.parse_cman_quorum_info("""\
+Version: 6.2.0
+Config Version: 23
+Cluster Name: cluster66
+Cluster Id: 22265
+Cluster Member: Yes
+Cluster Generation: 3612
+Membership state: Cluster-Member
+Nodes: 3
+Expected votes: 3
+Total votes: 3
+Node votes: 1
+Quorum: 2
+Active subsystems: 8
+Flags:
+Ports Bound: 0
+Node name: rh66-node2
+Node ID: 2
+Multicast addresses: 239.192.86.80
+Node addresses: 192.168.122.61
+---Votes---
+1 M 3 rh66-node1
+2 M 2 rh66-node2
+3 M 1 rh66-node3
+""")
+ self.assertEqual(True, parsed["quorate"])
+ self.assertEqual(2, parsed["quorum"])
+ self.assertEqual(
+ [
+ {"name": "rh66-node1", "votes": 3, "local": False},
+ {"name": "rh66-node2", "votes": 2, "local": True},
+ {"name": "rh66-node3", "votes": 1, "local": False},
+ ],
+ parsed["node_list"]
)
+ self.assertEqual([], parsed["qdevice_list"])
+
+ def test_no_quorate_no_qdevice(self):
+ parsed = utils.parse_cman_quorum_info("""\
+Version: 6.2.0
+Config Version: 23
+Cluster Name: cluster66
+Cluster Id: 22265
+Cluster Member: Yes
+Cluster Generation: 3612
+Membership state: Cluster-Member
+Nodes: 3
+Expected votes: 3
+Total votes: 3
+Node votes: 1
+Quorum: 2 Activity blocked
+Active subsystems: 8
+Flags:
+Ports Bound: 0
+Node name: rh66-node1
+Node ID: 1
+Multicast addresses: 239.192.86.80
+Node addresses: 192.168.122.61
+---Votes---
+1 M 3 rh66-node1
+2 X 2 rh66-node2
+3 X 1 rh66-node3
+""")
+ self.assertEqual(False, parsed["quorate"])
+ self.assertEqual(2, parsed["quorum"])
self.assertEqual(
- dom_get_child_elements(u)[1].getAttribute("value"),
- "90"
+ [
+ {"name": "rh66-node1", "votes": 3, "local": True},
+ ],
+ parsed["node_list"]
)
+ self.assertEqual([], parsed["qdevice_list"])
+
+ def test_error_missing_quorum(self):
+ parsed = utils.parse_cman_quorum_info("""\
+Version: 6.2.0
+Config Version: 23
+Cluster Name: cluster66
+Cluster Id: 22265
+Cluster Member: Yes
+Cluster Generation: 3612
+Membership state: Cluster-Member
+Nodes: 3
+Expected votes: 3
+Total votes: 3
+Node votes: 1
+Quorum:
+Active subsystems: 8
+Flags:
+Ports Bound: 0
+Node name: rh66-node2
+Node ID: 2
+Multicast addresses: 239.192.86.80
+Node addresses: 192.168.122.61
+---Votes---
+1 M 3 rh66-node1
+2 M 2 rh66-node2
+3 M 1 rh66-node3
+""")
+ self.assertEqual(None, parsed)
+
+ def test_error_quorum_garbage(self):
+ parsed = utils.parse_cman_quorum_info("""\
+Version: 6.2.0
+Config Version: 23
+Cluster Name: cluster66
+Cluster Id: 22265
+Cluster Member: Yes
+Cluster Generation: 3612
+Membership state: Cluster-Member
+Nodes: 3
+Expected votes: 3
+Total votes: 3
+Node votes: 1
+Quorum: Foo
+Active subsystems: 8
+Flags:
+Ports Bound: 0
+Node name: rh66-node2
+Node ID: 2
+Multicast addresses: 239.192.86.80
+Node addresses: 192.168.122.61
+---Votes---
+1 M 3 rh66-node1
+2 M 2 rh66-node2
+3 M 1 rh66-node3
+""")
+ self.assertEqual(None, parsed)
+
+ def test_error_node_votes_garbage(self):
+ parsed = utils.parse_cman_quorum_info("""\
+Version: 6.2.0
+Config Version: 23
+Cluster Name: cluster66
+Cluster Id: 22265
+Cluster Member: Yes
+Cluster Generation: 3612
+Membership state: Cluster-Member
+Nodes: 3
+Expected votes: 3
+Total votes: 3
+Node votes: 1
+Quorum: 4
+Active subsystems: 8
+Flags:
+Ports Bound: 0
+Node name: rh66-node2
+Node ID: 2
+Multicast addresses: 239.192.86.80
+Node addresses: 192.168.122.61
+---Votes---
+1 M 3 rh66-node1
+2 M Foo rh66-node2
+3 M 1 rh66-node3
+""")
+ self.assertEqual(None, parsed)
- def test_dom_update_utilization_update_remove(self):
- el = xml.dom.minidom.parseString("""
- <resource id="test_id">
- <utilization id="test_id-utilization">
- <nvpair id="test_id-utilization-key" name="key" value="-1"/>
- <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
- </utilization>
- </resource>
- """).documentElement
- utils.dom_update_utilization(
- el, [("key", "100"), ("keys", "")]
- )
- u = dom_get_child_elements(el)[0]
- self.assertEqual(len(dom_get_child_elements(u)), 1)
- self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("id"),
- "test_id-utilization-key"
- )
- self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("name"),
- "key"
- )
- self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("value"),
- "100"
- )
+class ParseQuorumtoolOutputTest(unittest.TestCase):
+ def test_error_empty_string(self):
+ parsed = utils.parse_quorumtool_output("")
+ self.assertEqual(None, parsed)
- def test_dom_update_meta_attr_add(self):
- el = xml.dom.minidom.parseString("""
- <resource id="test_id"/>
- """).documentElement
- utils.dom_update_meta_attr(
- el, [("name", ""), ("key", "test"), ("key2", "val")]
- )
+ def test_quorate_no_qdevice(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:28 2015
+Quorum provider: corosync_votequorum
+Nodes: 3
+Node ID: 1
+Ring ID: 19860
+Quorate: Yes
- self.assertEqual(len(dom_get_child_elements(el)), 1)
- u = dom_get_child_elements(el)[0]
- self.assertEqual(u.tagName, "meta_attributes")
- self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes")
- self.assertEqual(len(dom_get_child_elements(u)), 2)
+Votequorum information
+----------------------
+Expected votes: 3
+Highest expected: 3
+Total votes: 3
+Quorum: 2
+Flags: Quorate
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 3 NR rh70-node1
+ 2 2 NR rh70-node2 (local)
+ 3 1 NR rh70-node3
+""")
+ self.assertEqual(True, parsed["quorate"])
+ self.assertEqual(2, parsed["quorum"])
self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("id"),
- "test_id-meta_attributes-key"
- )
- self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("name"),
- "key"
- )
- self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("value"),
- "test"
- )
- self.assertEqual(
- dom_get_child_elements(u)[1].getAttribute("id"),
- "test_id-meta_attributes-key2"
+ [
+ {"name": "rh70-node1", "votes": 3, "local": False},
+ {"name": "rh70-node2", "votes": 2, "local": True},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ parsed["node_list"]
)
+ self.assertEqual([], parsed["qdevice_list"])
+
+ def test_quorate_with_qdevice(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:28 2015
+Quorum provider: corosync_votequorum
+Nodes: 3
+Node ID: 1
+Ring ID: 19860
+Quorate: Yes
+
+Votequorum information
+----------------------
+Expected votes: 10
+Highest expected: 10
+Total votes: 10
+Quorum: 6
+Flags: Quorate Qdevice
+
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 3 A,V,MNW rh70-node1
+ 2 2 A,V,MNW rh70-node2 (local)
+ 3 1 A,V,MNW rh70-node3
+ 0 4 Qdevice
+""")
+ self.assertEqual(True, parsed["quorate"])
+ self.assertEqual(6, parsed["quorum"])
self.assertEqual(
- dom_get_child_elements(u)[1].getAttribute("name"),
- "key2"
+ [
+ {"name": "rh70-node1", "votes": 3, "local": False},
+ {"name": "rh70-node2", "votes": 2, "local": True},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ parsed["node_list"]
)
self.assertEqual(
- dom_get_child_elements(u)[1].getAttribute("value"),
- "val"
+ [
+ {"name": "Qdevice", "votes": 4, "local": False},
+ ],
+ parsed["qdevice_list"]
)
- def test_dom_update_meta_attr_update_remove(self):
- el = xml.dom.minidom.parseString("""
- <resource id="test_id">
- <meta_attributes id="test_id-utilization">
- <nvpair id="test_id-meta_attributes-key" name="key" value="test"/>
- <nvpair id="test_id-meta_attributes-key2" name="key2" value="val"/>
- </meta_attributes>
- </resource>
- """).documentElement
- utils.dom_update_meta_attr(
- el, [("key", "another_val"), ("key2", "")]
- )
+ def test_quorate_with_qdevice_lost(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:28 2015
+Quorum provider: corosync_votequorum
+Nodes: 3
+Node ID: 1
+Ring ID: 19860
+Quorate: Yes
- u = dom_get_child_elements(el)[0]
- self.assertEqual(len(dom_get_child_elements(u)), 1)
- self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("id"),
- "test_id-meta_attributes-key"
- )
+Votequorum information
+----------------------
+Expected votes: 10
+Highest expected: 10
+Total votes: 6
+Quorum: 6
+Flags: Quorate Qdevice
+
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 3 NA,V,MNW rh70-node1
+ 2 2 NA,V,MNW rh70-node2 (local)
+ 3 1 NA,V,MNW rh70-node3
+ 0 0 Qdevice (votes 4)
+""")
+ self.assertEqual(True, parsed["quorate"])
+ self.assertEqual(6, parsed["quorum"])
self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("name"),
- "key"
+ [
+ {"name": "rh70-node1", "votes": 3, "local": False},
+ {"name": "rh70-node2", "votes": 2, "local": True},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ parsed["node_list"]
)
self.assertEqual(
- dom_get_child_elements(u)[0].getAttribute("value"),
- "another_val"
+ [
+ {"name": "Qdevice", "votes": 0, "local": False},
+ ],
+ parsed["qdevice_list"]
)
- def test_get_utilization(self):
- el = xml.dom.minidom.parseString("""
- <resource id="test_id">
- <utilization id="test_id-utilization">
- <nvpair id="test_id-utilization-key" name="key" value="-1"/>
- <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
- </utilization>
- </resource>
- """).documentElement
- self.assertEqual({"key": "-1", "keys": "90"}, utils.get_utilization(el))
-
- def test_get_utilization_str(self):
- el = xml.dom.minidom.parseString("""
- <resource id="test_id">
- <utilization id="test_id-utilization">
- <nvpair id="test_id-utilization-key" name="key" value="-1"/>
- <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
- </utilization>
- </resource>
- """).documentElement
- self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el))
-
- def test_get_cluster_property_from_xml_enum(self):
- el = ET.fromstring("""
- <parameter name="no-quorum-policy" unique="0">
- <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
- <content type="enum" default="stop"/>
- <longdesc lang="en">What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide</longdesc>
- </parameter>
- """)
- expected = {
- "name": "no-quorum-policy",
- "shortdesc": "What to do when the cluster does not have quorum",
- "longdesc": "",
- "type": "enum",
- "default": "stop",
- "enum": ["stop", "freeze", "ignore", "suicide"]
- }
- self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
-
- def test_get_cluster_property_from_xml(self):
- el = ET.fromstring("""
- <parameter name="default-resource-stickiness" unique="0">
- <shortdesc lang="en"></shortdesc>
- <content type="integer" default="0"/>
- <longdesc lang="en"></longdesc>
- </parameter>
- """)
- expected = {
- "name": "default-resource-stickiness",
- "shortdesc": "",
- "longdesc": "",
- "type": "integer",
- "default": "0"
- }
- self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
-
- def test_get_cluster_property_default(self):
- definition = {
- "default-resource-stickiness": {
- "name": "default-resource-stickiness",
- "shortdesc": "",
- "longdesc": "",
- "type": "integer",
- "default": "0",
- "source": "pengine"
- },
- "no-quorum-policy": {
- "name": "no-quorum-policy",
- "shortdesc": "What to do when the cluster does not have quorum",
- "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide",
- "type": "enum",
- "default": "stop",
- "enum": ["stop", "freeze", "ignore", "suicide"],
- "source": "pengine"
- },
- "enable-acl": {
- "name": "enable-acl",
- "shortdesc": "Enable CIB ACL",
- "longdesc": "Enable CIB ACL",
- "type": "boolean",
- "default": "false",
- "source": "cib"
- }
- }
+ def test_no_quorate_no_qdevice(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:35 2015
+Quorum provider: corosync_votequorum
+Nodes: 1
+Node ID: 1
+Ring ID: 19868
+Quorate: No
+
+Votequorum information
+----------------------
+Expected votes: 3
+Highest expected: 3
+Total votes: 1
+Quorum: 2 Activity blocked
+Flags:
+
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 NR rh70-node1 (local)
+""")
+ self.assertEqual(False, parsed["quorate"])
+ self.assertEqual(2, parsed["quorum"])
self.assertEqual(
- utils.get_cluster_property_default(
- definition, "default-resource-stickiness"
- ),
- "0"
+ [
+ {"name": "rh70-node1", "votes": 1, "local": True},
+ ],
+ parsed["node_list"]
)
+ self.assertEqual([], parsed["qdevice_list"])
+
+ def test_no_quorate_with_qdevice(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:35 2015
+Quorum provider: corosync_votequorum
+Nodes: 1
+Node ID: 1
+Ring ID: 19868
+Quorate: No
+
+Votequorum information
+----------------------
+Expected votes: 3
+Highest expected: 3
+Total votes: 1
+Quorum: 2 Activity blocked
+Flags: Qdevice
+
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 NR rh70-node1 (local)
+ 0 0 Qdevice (votes 1)
+""")
+ self.assertEqual(False, parsed["quorate"])
+ self.assertEqual(2, parsed["quorum"])
self.assertEqual(
- utils.get_cluster_property_default(definition, "no-quorum-policy"),
- "stop"
+ [
+ {"name": "rh70-node1", "votes": 1, "local": True},
+ ],
+ parsed["node_list"]
)
self.assertEqual(
- utils.get_cluster_property_default(definition, "enable-acl"),
- "false"
- )
- self.assertRaises(
- utils.UnknownPropertyException,
- utils.get_cluster_property_default, definition, "non-existing"
+ [
+ {"name": "Qdevice", "votes": 0, "local": False},
+ ],
+ parsed["qdevice_list"]
)
- def test_is_valid_cib_value_unknown_type(self):
- # should be always true
- self.assertTrue(utils.is_valid_cib_value("unknown", "test"))
- self.assertTrue(utils.is_valid_cib_value("string", "string value"))
+ def test_error_missing_quorum(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:28 2015
+Quorum provider: corosync_votequorum
+Nodes: 3
+Node ID: 1
+Ring ID: 19860
+Quorate: Yes
- def test_is_valid_cib_value_integer(self):
- self.assertTrue(utils.is_valid_cib_value("integer", "0"))
- self.assertTrue(utils.is_valid_cib_value("integer", "42"))
- self.assertTrue(utils.is_valid_cib_value("integer", "-90"))
- self.assertTrue(utils.is_valid_cib_value("integer", "+90"))
- self.assertTrue(utils.is_valid_cib_value("integer", "INFINITY"))
- self.assertTrue(utils.is_valid_cib_value("integer", "-INFINITY"))
- self.assertTrue(utils.is_valid_cib_value("integer", "+INFINITY"))
- self.assertFalse(utils.is_valid_cib_value("integer", "0.0"))
- self.assertFalse(utils.is_valid_cib_value("integer", "-10.9"))
- self.assertFalse(utils.is_valid_cib_value("integer", "string"))
+Votequorum information
+----------------------
+Expected votes: 3
+Highest expected: 3
+Total votes: 3
+Quorum:
+Flags: Quorate
- def test_is_valid_cib_value_enum(self):
- self.assertTrue(
- utils.is_valid_cib_value("enum", "this", ["another", "this", "1"])
- )
- self.assertFalse(
- utils.is_valid_cib_value("enum", "this", ["another", "this_not"])
- )
- self.assertFalse(utils.is_valid_cib_value("enum", "this", []))
- self.assertFalse(utils.is_valid_cib_value("enum", "this"))
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 NR rh70-node1 (local)
+ 2 1 NR rh70-node2
+ 3 1 NR rh70-node3
+""")
+ self.assertEqual(None, parsed)
- def test_is_valid_cib_value_boolean(self):
- self.assertTrue(utils.is_valid_cib_value("boolean", "true"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "TrUe"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "TRUE"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "yes"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "on"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "y"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "Y"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "1"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "false"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "FaLse"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "FALSE"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "off"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "no"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "N"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "n"))
- self.assertTrue(utils.is_valid_cib_value("boolean", "0"))
- self.assertFalse(utils.is_valid_cib_value("boolean", "-1"))
- self.assertFalse(utils.is_valid_cib_value("boolean", "not"))
- self.assertFalse(utils.is_valid_cib_value("boolean", "random_string"))
- self.assertFalse(utils.is_valid_cib_value("boolean", "truth"))
+ def test_error_quorum_garbage(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:28 2015
+Quorum provider: corosync_votequorum
+Nodes: 3
+Node ID: 1
+Ring ID: 19860
+Quorate: Yes
- def test_is_valid_cib_value_time(self):
- self.assertTrue(utils.is_valid_cib_value("time", "10"))
- self.assertTrue(utils.is_valid_cib_value("time", "0"))
- self.assertTrue(utils.is_valid_cib_value("time", "9s"))
- self.assertTrue(utils.is_valid_cib_value("time", "10sec"))
- self.assertTrue(utils.is_valid_cib_value("time", "10min"))
- self.assertTrue(utils.is_valid_cib_value("time", "10m"))
- self.assertTrue(utils.is_valid_cib_value("time", "10h"))
- self.assertTrue(utils.is_valid_cib_value("time", "10hr"))
- self.assertFalse(utils.is_valid_cib_value("time", "5.2"))
- self.assertFalse(utils.is_valid_cib_value("time", "-10"))
- self.assertFalse(utils.is_valid_cib_value("time", "10m 2s"))
- self.assertFalse(utils.is_valid_cib_value("time", "hour"))
- self.assertFalse(utils.is_valid_cib_value("time", "day"))
+Votequorum information
+----------------------
+Expected votes: 3
+Highest expected: 3
+Total votes: 3
+Quorum: Foo
+Flags: Quorate
- def test_validate_cluster_property(self):
- definition = {
- "default-resource-stickiness": {
- "name": "default-resource-stickiness",
- "shortdesc": "",
- "longdesc": "",
- "type": "integer",
- "default": "0",
- "source": "pengine"
- },
- "no-quorum-policy": {
- "name": "no-quorum-policy",
- "shortdesc": "What to do when the cluster does not have quorum",
- "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide",
- "type": "enum",
- "default": "stop",
- "enum": ["stop", "freeze", "ignore", "suicide"],
- "source": "pengine"
- },
- "enable-acl": {
- "name": "enable-acl",
- "shortdesc": "Enable CIB ACL",
- "longdesc": "Enable CIB ACL",
- "type": "boolean",
- "default": "false",
- "source": "cib"
- }
- }
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "default-resource-stickiness", "10"
- ))
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "default-resource-stickiness", "-1"
- ))
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "no-quorum-policy", "freeze"
- ))
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "no-quorum-policy", "suicide"
- ))
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "enable-acl", "true"
- ))
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "enable-acl", "false"
- ))
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "enable-acl", "on"
- ))
- self.assertTrue(utils.is_valid_cluster_property(
- definition, "enable-acl", "OFF"
- ))
- self.assertFalse(utils.is_valid_cluster_property(
- definition, "default-resource-stickiness", "test"
- ))
- self.assertFalse(utils.is_valid_cluster_property(
- definition, "default-resource-stickiness", "1.2"
- ))
- self.assertFalse(utils.is_valid_cluster_property(
- definition, "no-quorum-policy", "invalid"
- ))
- self.assertFalse(utils.is_valid_cluster_property(
- definition, "enable-acl", "not"
- ))
- self.assertRaises(
- utils.UnknownPropertyException,
- utils.is_valid_cluster_property, definition, "unknown", "value"
- )
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 NR rh70-node1 (local)
+ 2 1 NR rh70-node2
+ 3 1 NR rh70-node3
+""")
+ self.assertEqual(None, parsed)
+
+ def test_error_node_votes_garbage(self):
+ parsed = utils.parse_quorumtool_output("""\
+Quorum information
+------------------
+Date: Fri Jan 16 13:03:28 2015
+Quorum provider: corosync_votequorum
+Nodes: 3
+Node ID: 1
+Ring ID: 19860
+Quorate: Yes
+
+Votequorum information
+----------------------
+Expected votes: 3
+Highest expected: 3
+Total votes: 3
+Quorum: 2
+Flags: Quorate
- def assert_element_id(self, node, node_id):
- self.assertTrue(
- isinstance(node, xml.dom.minidom.Element),
- "element with id '%s' not found" % node_id
+Membership information
+----------------------
+ Nodeid Votes Qdevice Name
+ 1 1 NR rh70-node1 (local)
+ 2 foo NR rh70-node2
+ 3 1 NR rh70-node3
+""")
+ self.assertEqual(None, parsed)
+
+
+class IsNodeStopCauseQuorumLossTest(unittest.TestCase):
+ def test_not_quorate(self):
+ quorum_info = {
+ "quorate": False,
+ }
+ self.assertEqual(
+ False,
+ utils.is_node_stop_cause_quorum_loss(quorum_info, True)
)
- self.assertEqual(node.getAttribute("id"), node_id)
-class RunParallelTest(unittest.TestCase):
- def fixture_create_worker(self, log, name, sleepSeconds=0):
- def worker():
- sleep(sleepSeconds)
- log.append(name)
- return worker
+ def test_local_node_not_in_list(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 1,
+ "node_list": [
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [],
+ }
+ self.assertEqual(
+ False,
+ utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ )
- def test_run_all_workers(self):
- log = []
- utils.run_parallel(
- [
- self.fixture_create_worker(log, 'first'),
- self.fixture_create_worker(log, 'second'),
+ def test_local_node_alone_in_list(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 1,
+ "node_list": [
+ {"name": "rh70-node3", "votes": 1, "local": True},
],
- wait_seconds=.1
+ "qdevice_list": [],
+ }
+ self.assertEqual(
+ True,
+ utils.is_node_stop_cause_quorum_loss(quorum_info, True)
)
- self.assertEqual(log, ['first', 'second'])
+ def test_local_node_still_quorate(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 3, "local": False},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": True},
+ ],
+ "qdevice_list": [],
+ }
+ self.assertEqual(
+ False,
+ utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ )
- def test_wait_for_slower_workers(self):
- log = []
- utils.run_parallel(
- [
- self.fixture_create_worker(log, 'first', .03),
- self.fixture_create_worker(log, 'second'),
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 3, "local": False},
+ {"name": "rh70-node2", "votes": 2, "local": True},
+ {"name": "rh70-node3", "votes": 1, "local": False},
],
- wait_seconds=.01
+ "qdevice_list": [],
+ }
+ self.assertEqual(
+ False,
+ utils.is_node_stop_cause_quorum_loss(quorum_info, True)
)
- self.assertEqual(log, ['second', 'first'])
+ def test_local_node_quorum_loss(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 3, "local": True},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [],
+ }
+ self.assertEqual(
+ True,
+ utils.is_node_stop_cause_quorum_loss(quorum_info, True)
+ )
-class PrepareNodeNamesTest(unittest.TestCase):
- def test_return_original_when_is_in_pacemaker_nodes(self):
- node = 'test'
+ def test_one_node_still_quorate(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 3, "local": True},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [],
+ }
self.assertEqual(
- node,
- utils.prepare_node_name(node, {1: node}, {})
+ False,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node3"]
+ )
)
- def test_return_original_when_is_not_in_corosync_nodes(self):
- node = 'test'
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 3, "local": True},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [],
+ }
self.assertEqual(
- node,
- utils.prepare_node_name(node, {}, {})
+ False,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node2"]
+ )
)
- def test_return_original_when_corosync_id_not_in_pacemaker(self):
- node = 'test'
+ def test_one_node_quorum_loss(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 3, "local": True},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [],
+ }
self.assertEqual(
- node,
- utils.prepare_node_name(node, {}, {1: node})
+ True,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node1"]
+ )
)
- def test_return_modified_name(self):
- node = 'test'
+ def test_more_nodes_still_quorate(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 4, "local": True},
+ {"name": "rh70-node2", "votes": 1, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [],
+ }
self.assertEqual(
- 'another (test)',
- utils.prepare_node_name(node, {1: 'another'}, {1: node})
+ False,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node2", "rh70-node3"]
+ )
)
- def test_return_modified_name_with_pm_null_case(self):
- node = 'test'
+ def test_more_nodes_quorum_loss(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 3, "local": True},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [],
+ }
self.assertEqual(
- '*Unknown* (test)',
- utils.prepare_node_name(node, {1: '(null)'}, {1: node})
+ True,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node2", "rh70-node3"]
+ )
)
-class NodeActionTaskTest(unittest.TestCase):
- def test_can_run_action(self):
- def action(node, arg, kwarg=None):
- return (0, ':'.join([node, arg, kwarg]))
+ def test_qdevice_still_quorate(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 3,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 1, "local": True},
+ {"name": "rh70-node2", "votes": 1, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [
+ {"name": "Qdevice", "votes": 1, "local": False},
+ ],
+ }
+ self.assertEqual(
+ False,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node2"]
+ )
+ )
- report_list = []
- def report(node, returncode, output):
- report_list.append('|'.join([node, str(returncode), output]))
+ def test_qdevice_quorum_lost(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 3,
+ "node_list": [
+ {"name": "rh70-node1", "votes": 1, "local": True},
+ {"name": "rh70-node2", "votes": 1, "local": False},
+ {"name": "rh70-node3", "votes": 1, "local": False},
+ ],
+ "qdevice_list": [
+ {"name": "Qdevice", "votes": 1, "local": False},
+ ],
+ }
+ self.assertEqual(
+ True,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node2", "rh70-node3"]
+ )
+ )
- task = utils.create_task(report, action, 'node', 'arg', kwarg='kwarg')
- task()
+ def test_qdevice_lost_still_quorate(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4, # expect qdevice votes == 1
+ "node_list": [
+ {"name": "rh70-node1", "votes": 2, "local": True},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 2, "local": False},
+ ],
+ "qdevice_list": [
+ {"name": "Qdevice", "votes": 0, "local": False},
+ ],
+ }
+ self.assertEqual(
+ False,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node2"]
+ )
+ )
- self.assertEqual(['node|0|node:arg:kwarg'], report_list)
+ def test_qdevice_lost_quorum_lost(self):
+ quorum_info = {
+ "quorate": True,
+ "quorum": 4, # expect qdevice votes == 1
+ "node_list": [
+ {"name": "rh70-node1", "votes": 2, "local": True},
+ {"name": "rh70-node2", "votes": 2, "local": False},
+ {"name": "rh70-node3", "votes": 2, "local": False},
+ ],
+ "qdevice_list": [
+ {"name": "Qdevice", "votes": 0, "local": False},
+ ],
+ }
+ self.assertEqual(
+ True,
+ utils.is_node_stop_cause_quorum_loss(
+ quorum_info, False, ["rh70-node2", "rh70-node3"]
+ )
+ )
diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py
index 305fe32..78a0787 100644
--- a/pcs/test/tools/color_text_runner.py
+++ b/pcs/test/tools/color_text_runner.py
@@ -64,6 +64,16 @@ class ColorTextTestResult(TextTestResult):
self.stream.write(apply(["lightred", "bold"], 'F'))
self.stream.flush()
+ def addSkip(self, test, reason):
+ super(TextTestResult, self).addSkip(test, reason)
+ if self.showAll:
+ self.stream.writeln(
+ apply(["blue", "bold"], "skipped {0!r}".format(reason))
+ )
+ elif self.dots:
+ self.stream.write(apply(["blue", "bold"], 's'))
+ self.stream.flush()
+
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
diff --git a/pcs/usage.py b/pcs/usage.py
index 9d24b78..ee53a2f 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -24,6 +24,7 @@ def full_usage():
out += strip_extras(status([],False))
out += strip_extras(config([],False))
out += strip_extras(pcsd([],False))
+ out += strip_extras(alert([], False))
print(out.strip())
print("Examples:\n" + examples.replace(" \ ",""))
@@ -115,6 +116,7 @@ def generate_completion_tree_from_usage():
tree["config"] = generate_tree(config([],False))
tree["pcsd"] = generate_tree(pcsd([],False))
tree["node"] = generate_tree(node([], False))
+ tree["alert"] = generate_tree(alert([], False))
return tree
def generate_tree(usage_txt):
@@ -169,6 +171,7 @@ Commands:
config View and manage cluster configuration.
pcsd Manage pcs daemon.
node Manage cluster nodes.
+ alert Set pacemaker alerts.
"""
# Advanced usage to possibly add later
# --corosync_conf=<corosync file> Specify alternative corosync.conf file
@@ -650,7 +653,7 @@ Commands:
scope=configuration. Do not specify a scope if you want to edit
the saved CIB using pcs (pcs -f <command>).
- cib-push <filename> [scope=<scope> | --config]
+ cib-push <filename> [scope=<scope> | --config] [--wait[=<n>]]
Push the raw xml from <filename> to the CIB (Cluster Information Base).
You can obtain the CIB by running the 'pcs cluster cib' command, which
is recommended first step when you want to perform desired
@@ -660,7 +663,8 @@ Commands:
crm_config, rsc_defaults, op_defaults. --config is the same as
scope=configuration. Use of --config is recommended. Do not specify
a scope if you need to push the whole CIB or be warned in the case
- of outdated CIB.
+ of outdated CIB. If --wait is specified wait up to 'n' seconds for
+ changes to be applied.
WARNING: the selected scope of the CIB will be overwritten by the
current content of the specified file.
@@ -998,8 +1002,8 @@ Commands:
Create a ticket constraint with a resource set.
Available options are sequential=true/false, require-all=true/false,
action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
- Required constraint option is ticket.
- Optional constraint option is loss-policy=fence/stop/freeze/demote.
+ Required constraint option is ticket=<ticket>. Optional constraint
+ options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
remove [constraint id]...
Remove constraint(s) or constraint rules with the specified id(s).
@@ -1269,14 +1273,20 @@ Commands:
def qdevice(args=[], pout=True):
output = """
Usage: pcs qdevice <command>
-Manage quorum device provider on the local host
+Manage quorum device provider on the local host, currently only 'net' model is
+supported.
Commands:
+ status <device model> [--full] [<cluster name>]
+ Show runtime status of specified model of quorum device provider. Using
+ --full will give more detailed output. If <cluster name> is specified,
+ only information about the specified cluster will be displayed.
+
setup model <device model> [--enable] [--start]
Configure specified model of quorum device provider. Quorum device then
- may be added to clusters by "pcs quorum device add" command.
- --start will also start the provider. --enable will configure
- the provider to start on boot.
+ can be added to clusters by running "pcs quorum device add" command
+ in a cluster. --start will also start the provider. --enable will
+ configure the provider to start on boot.
destroy <device model>
Disable and stop specified model of quorum device provider and delete
@@ -1289,8 +1299,10 @@ Commands:
Stop specified model of quorum device provider.
kill <device model>
- Force specified model of quorum device provider to stop (performs
- kill -9).
+ Force specified model of quorum device provider to stop (performs kill
+ -9). Note that init system (e.g. systemd) can detect that the qdevice
+ is not running and start it again. If you want to stop the qdevice, run
+ "pcs qdevice stop" command.
enable <device model>
Configure specified model of quorum device provider to start on boot.
@@ -1307,21 +1319,42 @@ Commands:
def quorum(args=[], pout=True):
output = """
Usage: pcs quorum <command>
-Manage cluster quorum settings
+Manage cluster quorum settings.
Commands:
config
Show quorum configuration.
- device add [generic options] model <device model> [model options]
- Add quorum device to cluster. Quorum device needs to be created first
- by "pcs qdevice setup" command.
+ status
+ Show quorum runtime status.
+
+ device add [<generic options>] model <device model> [<model options>]
+ Add a quorum device to the cluster. Quorum device needs to be created
+ first by "pcs qdevice setup" command. It is not possible to use more
+ than one quorum device in a cluster simultaneously. Generic options,
+ model and model options are all documented in corosync's
+ corosync-qdevice(8) man page.
device remove
- Remove quorum device from cluster.
+ Remove a quorum device from the cluster.
+
+ device status [--full]
+ Show quorum device runtime status. Using --full will give more detailed
+ output.
+
+ device update [<generic options>] [model <model options>]
+ Add/Change quorum device options. Generic options and model options are
+ all documented in corosync's corosync-qdevice(8) man page. Requires
+ the cluster to be stopped.
+
+ WARNING: If you want to change "host" option of qdevice model net, use
+ "pcs quorum device remove" and "pcs quorum device add" commands
+ to set up configuration properly unless old and new host is the same
+ machine.
- device update [generic options] [model <model options>]
- Add/Change quorum device options. Requires cluster to be stopped.
+ expected-votes <votes>
+ Set expected votes in the live cluster to specified value. This only
+ affects the live cluster, not changes any configuration files.
unblock [--force]
Cancel waiting for all nodes when establishing quorum. Useful in
@@ -1340,16 +1373,56 @@ Commands:
[last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
Add/Change quorum options. At least one option must be specified.
Options are documented in corosync's votequorum(5) man page. Requires
- cluster to be stopped.
+ the cluster to be stopped.
"""
if pout:
print(sub_usage(args, output))
else:
return output
+
+def alert(args=[], pout=True):
+ output = """
+Usage: pcs alert <command>
+Set pacemaker alerts.
+
+Commands:
+ [config|show]
+ Show all configured alerts.
+
+ create path=<path> [id=<alert-id>] [description=<description>]
+ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
+ Create new alert with specified path. Id will be automatically
+ generated if it is not specified.
+
+ update <alert-id> [path=<path>] [description=<description>]
+ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
+ Update existing alert with specified id.
+
+ remove <alert-id>
+ Remove alert with specified id.
+
+ recipient add <alert-id> <recipient-value> [description=<description>]
+ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
+ Add new recipient to specified alert.
+
+ recipient update <alert-id> <recipient-value> [description=<description>]
+ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
+ Update existing recipient identified by alert and it's value.
+
+ recipient remove <alert-id> <recipient-value>
+ Remove specified recipient.
+"""
+ if pout:
+ print(sub_usage(args, output))
+ else:
+ return output
+
+
def show(main_usage_name, rest_usage_names):
usage_map = {
"acl": acl,
+ "alert": alert,
"cluster": cluster,
"config": config,
"constraint": constraint,
diff --git a/pcs/utils.py b/pcs/utils.py
index 11bd4cf..171fbdd 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -56,7 +56,6 @@ except ImportError:
from pcs import settings, usage
-from pcs.common import report_codes
from pcs.cli.common.reports import (
process_library_reports,
LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
@@ -64,18 +63,21 @@ from pcs.cli.common.reports import (
from pcs.common.tools import simple_cache
from pcs.lib import reports
from pcs.lib.env import LibraryEnvironment
-from pcs.lib.errors import LibraryError, ReportItemSeverity
-import pcs.lib.corosync.config_parser as corosync_conf_parser
+from pcs.lib.errors import LibraryError
from pcs.lib.external import (
- is_cman_cluster,
CommandRunner,
- is_service_running,
- is_service_enabled,
+ is_cman_cluster,
is_systemctl,
+ is_service_enabled,
+ is_service_running,
+ disable_service,
+ DisableServiceError,
+ enable_service,
+ EnableServiceError,
)
import pcs.lib.resource_agent as lib_ra
+import pcs.lib.corosync.config_parser as corosync_conf_parser
from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
-from pcs.lib.nodes_task import check_corosync_offline_on_nodes
from pcs.lib.pacemaker import has_resource_wait_support
from pcs.lib.pacemaker_state import ClusterState
from pcs.lib.pacemaker_values import(
@@ -686,50 +688,18 @@ def autoset_2node_corosync(corosync_conf):
facade._ConfigFacade__update_two_node()
return facade.config
-# when adding or removing a node, changing number of nodes to or from two,
-# we need to change qdevice algorith lms <-> 2nodelms, which cannot be done when
-# the cluster is running
-def check_qdevice_algorithm_and_running_cluster(corosync_conf, add=True):
+# is it needed to handle corosync-qdevice service when managing cluster services
+def need_to_handle_qdevice_service():
if is_rhel6():
- return
- facade = corosync_conf_facade.from_string(corosync_conf)
- if not facade.has_quorum_device():
- return
- node_list = facade.get_nodes()
- node_count_target = len(node_list) + (1 if add else -1)
- model, model_opts, dummy_generic_opts = facade.get_quorum_device_settings()
- if model != "net":
- return
- algorithm = model_opts.get("algorithm", "")
- need_stopped = (
- (algorithm == "lms" and node_count_target == 2)
- or
- (algorithm == "2nodelms" and node_count_target != 2)
- )
- if not need_stopped:
- return
-
+ return False
try:
- lib_env = get_lib_env()
- check_corosync_offline_on_nodes(
- lib_env.node_communicator(),
- lib_env.report_processor,
- node_list,
- get_modificators()["skip_offline_nodes"]
+ cfg = corosync_conf_facade.from_string(
+ open(settings.corosync_conf_file).read()
)
- except LibraryError as e:
- report_item_list = list(e.args)
- for report_item in report_item_list:
- if (
- report_item.code == report_codes.COROSYNC_RUNNING_ON_NODE
- and
- report_item.severity == ReportItemSeverity.ERROR
- ):
- report_item_list.append(
- reports.qdevice_remove_or_cluster_stop_needed()
- )
- break
- process_library_reports(report_item_list)
+ return cfg.has_quorum_device()
+ except (EnvironmentError, corosync_conf_parser.CorosyncConfParserException):
+ # corosync.conf not present or not valid => no qdevice specified
+ return False
def getNextNodeID(corosync_conf):
currentNodes = []
@@ -1592,7 +1562,7 @@ def is_etree(var):
)
# Replace only configuration section of cib with dom passed
-def replace_cib_configuration(dom):
+def replace_cib_configuration(dom, cib_upgraded=False):
if is_etree(dom):
#etree returns string in bytes: b'xml'
#python 3 removed .encode() from byte strings
@@ -1603,7 +1573,12 @@ def replace_cib_configuration(dom):
new_dom = dom.toxml()
else:
new_dom = dom
- output, retval = run(["cibadmin", "--replace", "-o", "configuration", "-V", "--xml-pipe"],False,new_dom)
+ cmd = ["cibadmin", "--replace", "-V", "--xml-pipe"]
+ if cib_upgraded:
+ print("CIB has been upgraded to the latest schema version.")
+ else:
+ cmd += ["-o", "configuration"]
+ output, retval = run(cmd, False, new_dom)
if retval != 0:
err("Unable to update cib\n"+output)
@@ -2065,28 +2040,43 @@ def serviceStatus(prefix):
pass
def enableServices():
+ # do NOT handle SBD in here, it is started by pacemaker not systemd or init
if is_rhel6():
- run(["chkconfig", "pacemaker", "on"])
+ service_list = ["pacemaker"]
else:
- if is_systemctl():
- run(["systemctl", "enable", "corosync.service"])
- run(["systemctl", "enable", "pacemaker.service"])
- else:
- run(["chkconfig", "corosync", "on"])
- run(["chkconfig", "pacemaker", "on"])
+ service_list = ["corosync", "pacemaker"]
+ if need_to_handle_qdevice_service():
+ service_list.append("corosync-qdevice")
+
+ report_item_list = []
+ for service in service_list:
+ try:
+ enable_service(cmd_runner(), service)
+ except EnableServiceError as e:
+ report_item_list.append(
+ reports.service_enable_error(e.service, e.message)
+ )
+ if report_item_list:
+ raise LibraryError(*report_item_list)
def disableServices():
- if is_rhel6():
- run(["chkconfig", "pacemaker", "off"])
- run(["chkconfig", "corosync", "off"]) # Left here for users of old pcs
- # which enabled corosync
- else:
- if is_systemctl():
- run(["systemctl", "disable", "corosync.service"])
- run(["systemctl", "disable", "pacemaker.service"])
- else:
- run(["chkconfig", "corosync", "off"])
- run(["chkconfig", "pacemaker", "off"])
+ # Disable corosync on RHEL6 as well - left here for users of old pcs which
+ # enabled corosync.
+ # do NOT handle SBD in here, it is started by pacemaker not systemd or init
+ service_list = ["corosync", "pacemaker"]
+ if need_to_handle_qdevice_service():
+ service_list.append("corosync-qdevice")
+
+ report_item_list = []
+ for service in service_list:
+ try:
+ disable_service(cmd_runner(), service)
+ except DisableServiceError as e:
+ report_item_list.append(
+ reports.service_disable_error(e.service, e.message)
+ )
+ if report_item_list:
+ raise LibraryError(*report_item_list)
def write_file(path, data, permissions=0o644, binary=False):
if os.path.exists(path):
@@ -2243,7 +2233,7 @@ def parse_cman_quorum_info(cman_info):
in_node_list = False
local_node_id = ""
try:
- for line in cman_info.split("\n"):
+ for line in cman_info.splitlines():
line = line.strip()
if not line:
continue
@@ -2255,12 +2245,13 @@ def parse_cman_quorum_info(cman_info):
parsed["node_list"].append({
"name": parts[3],
"votes": int(parts[2]),
- "local": local_node_id == parts[0]
+ "local": local_node_id == parts[0],
})
else:
if line == "---Votes---":
in_node_list = True
parsed["node_list"] = []
+ parsed["qdevice_list"] = []
continue
if not ":" in line:
continue
@@ -2285,7 +2276,7 @@ def parse_quorumtool_output(quorumtool_output):
parsed = {}
in_node_list = False
try:
- for line in quorumtool_output.split("\n"):
+ for line in quorumtool_output.splitlines():
line = line.strip()
if not line:
continue
@@ -2294,15 +2285,25 @@ def parse_quorumtool_output(quorumtool_output):
# skip headers
continue
parts = line.split()
- parsed["node_list"].append({
- "name": parts[3],
- "votes": int(parts[1]),
- "local": len(parts) > 4 and parts[4] == "(local)"
- })
+ if parts[0] == "0":
+ # this line has nodeid == 0, this is a qdevice line
+ parsed["qdevice_list"].append({
+ "name": parts[2],
+ "votes": int(parts[1]),
+ "local": False,
+ })
+ else:
+ # this line has non-zero nodeid, this is a node line
+ parsed["node_list"].append({
+ "name": parts[3],
+ "votes": int(parts[1]),
+ "local": len(parts) > 4 and parts[4] == "(local)",
+ })
else:
if line == "Membership information":
in_node_list = True
parsed["node_list"] = []
+ parsed["qdevice_list"] = []
continue
if not ":" in line:
continue
@@ -2335,6 +2336,8 @@ def is_node_stop_cause_quorum_loss(quorum_info, local=True, node_list=None):
if node_list and node_info["name"] in node_list:
continue
votes_after_stop += node_info["votes"]
+ for qdevice_info in quorum_info.get("qdevice_list", []):
+ votes_after_stop += qdevice_info["votes"]
return votes_after_stop < quorum_info["quorum"]
def dom_prepare_child_element(dom_element, tag_name, id):
@@ -2656,6 +2659,7 @@ def get_modificators():
"enable": "--enable" in pcs_options,
"force": "--force" in pcs_options,
"full": "--full" in pcs_options,
+ "name": pcs_options.get("--name", None),
"skip_offline_nodes": "--skip-offline" in pcs_options,
"start": "--start" in pcs_options,
"watchdog": pcs_options.get("--watchdog", []),
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 2fa34af..276880c 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -43,7 +43,7 @@ def get_pcs_path(pcsd_path)
end
end
-PCS_VERSION = '0.9.152'
+PCS_VERSION = '0.9.153'
COROSYNC = COROSYNC_BINARIES + "corosync"
ISRHEL6 = is_rhel6
ISSYSTEMCTL = is_systemctl
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 415e02a..7c25e10 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -1965,6 +1965,23 @@ def disable_service(service)
return (retcode == 0)
end
+def start_service(service)
+ _, _, retcode = run_cmd(
+ PCSAuth.getSuperuserAuth(), "service", service, "start"
+ )
+ return (retcode == 0)
+end
+
+def stop_service(service)
+ if not is_service_installed?(service)
+ return true
+ end
+ _, _, retcode = run_cmd(
+ PCSAuth.getSuperuserAuth(), "service", service, "stop"
+ )
+ return (retcode == 0)
+end
+
def set_cluster_prop_force(auth_user, prop, val)
cmd = [PCS, 'property', 'set', "#{prop}=#{val}", '--force']
if pacemaker_running?
diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
index 075a3a6..e506f1b 100644
--- a/pcsd/pcsd.service
+++ b/pcsd/pcsd.service
@@ -4,7 +4,7 @@ Description=PCS GUI and remote configuration interface
[Service]
EnvironmentFile=/etc/sysconfig/pcsd
Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
-ExecStart=/usr/bin/ruby -C/var/lib/pcsd -I/usr/lib/pcsd -- /usr/lib/pcsd/ssl.rb > /dev/null &
+ExecStart=/usr/lib/pcsd/pcsd > /dev/null &
[Install]
WantedBy=multi-user.target
diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner
new file mode 100644
index 0000000..1949a68
--- /dev/null
+++ b/pcsd/pcsd.service-runner
@@ -0,0 +1,13 @@
+#!/usr/bin/ruby
+# this file is a pcsd runner callable from a systemd unit
+# it also serves as a holder of a selinux context
+
+# add pcsd to the load path (ruby -I)
+libdir = File.dirname(__FILE__)
+$LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
+
+# change current directory (ruby -C)
+Dir.chdir('/var/lib/pcsd')
+
+# import and run pcsd
+require 'ssl'
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index f002d5b..b1e00fa 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -4,6 +4,8 @@ require 'open4'
require 'set'
require 'timeout'
require 'rexml/document'
+require 'base64'
+require 'tempfile'
require 'pcs.rb'
require 'resource.rb'
@@ -71,7 +73,16 @@ def remote(params, request, auth_user)
:remove_stonith_watchdog_timeout=> method(:remove_stonith_watchdog_timeout),
:set_stonith_watchdog_timeout_to_zero => method(:set_stonith_watchdog_timeout_to_zero),
:remote_enable_sbd => method(:remote_enable_sbd),
- :remote_disable_sbd => method(:remote_disable_sbd)
+ :remote_disable_sbd => method(:remote_disable_sbd),
+ :qdevice_net_get_ca_certificate => method(:qdevice_net_get_ca_certificate),
+ :qdevice_net_sign_node_certificate => method(:qdevice_net_sign_node_certificate),
+ :qdevice_net_client_init_certificate_storage => method(:qdevice_net_client_init_certificate_storage),
+ :qdevice_net_client_import_certificate => method(:qdevice_net_client_import_certificate),
+ :qdevice_net_client_destroy => method(:qdevice_net_client_destroy),
+ :qdevice_client_enable => method(:qdevice_client_enable),
+ :qdevice_client_disable => method(:qdevice_client_disable),
+ :qdevice_client_start => method(:qdevice_client_start),
+ :qdevice_client_stop => method(:qdevice_client_stop),
}
remote_cmd_with_pacemaker = {
:pacemaker_node_status => method(:remote_pacemaker_node_status),
@@ -1513,23 +1524,73 @@ def remove_resource(params, request, auth_user)
return 403, 'Permission denied'
end
force = params['force']
+ user = PCSAuth.getSuperuserAuth()
no_error_if_not_exists = params.include?('no_error_if_not_exists')
- errors = ""
- params.each { |k,v|
- if k.index("resid-") == 0
- resid = k.gsub('resid-', '')
- command = [PCS, 'resource', 'delete', resid]
- command << '--force' if force
- out, errout, retval = run_cmd(auth_user, *command)
+ resource_list = []
+ errors = ''
+ resource_to_remove = []
+ params.each { |param,_|
+ if param.start_with?('resid-')
+ resource_list << param.split('resid-', 2)[1]
+ end
+ }
+ tmp_file = nil
+ if force
+ resource_to_remove = resource_list
+ else
+ begin
+ tmp_file = Tempfile.new('temp_cib')
+ _, err, retval = run_cmd(user, PCS, 'cluster', 'cib', tmp_file.path)
if retval != 0
- unless out.index(" does not exist.") != -1 and no_error_if_not_exists
- errors += errout.join(' ').strip + "\n"
+ return [400, 'Unable to stop resource(s).']
+ end
+ cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable']
+ resource_list.each { |resource|
+ _, err, retval = run_cmd(user, *cmd, resource)
+ if retval != 0
+ unless (
+ err.join('').index('unable to find a resource') != -1 and
+ no_error_if_not_exists
+ )
+ errors += "Unable to stop resource '#{resource}': #{err.join('')}"
+ end
+ else
+ resource_to_remove << resource
end
+ }
+ _, _, retval = run_cmd(
+ user, PCS, 'cluster', 'cib-push', tmp_file.path, '--config', '--wait'
+ )
+ if retval != 0
+ return [400, 'Unable to stop resource(s).']
+ end
+ errors.strip!
+ unless errors.empty?
+ $logger.info("Stopping resource(s) errors:\n#{errors}")
+ return [400, errors]
+ end
+ rescue IOError
+ return [400, 'Unable to stop resource(s).']
+ ensure
+ if tmp_file
+ tmp_file.close!
+ end
+ end
+ end
+ resource_to_remove.each { |resource|
+ cmd = [PCS, 'resource', 'delete', resource]
+ if force
+ cmd << '--force'
+ end
+ out, err, retval = run_cmd(auth_user, *cmd)
+ if retval != 0
+ unless out.index(' does not exist.') != -1 and no_error_if_not_exists
+ errors += err.join(' ').strip + "\n"
end
end
}
errors.strip!
- if errors == ""
+ if errors.empty?
return 200
else
$logger.info("Remove resource errors:\n"+errors)
@@ -2377,3 +2438,154 @@ def remote_disable_sbd(params, request, auth_user)
return [200, 'Sbd has been disabled.']
end
+
+def qdevice_net_get_ca_certificate(params, request, auth_user)
+ unless allowed_for_local_cluster(auth_user, Permissions::READ)
+ return 403, 'Permission denied'
+ end
+ begin
+ return [
+ 200,
+ Base64.encode64(File.read(COROSYNC_QDEVICE_NET_SERVER_CA_FILE))
+ ]
+ rescue => e
+ return [400, "Unable to read certificate: #{e}"]
+ end
+end
+
+def qdevice_net_sign_node_certificate(params, request, auth_user)
+ unless allowed_for_local_cluster(auth_user, Permissions::READ)
+ return 403, 'Permission denied'
+ end
+ stdout, stderr, retval = run_cmd_options(
+ auth_user,
+ {'stdin' => params[:certificate_request]},
+ PCS, 'qdevice', 'sign-net-cert-request', '--name', params[:cluster_name]
+ )
+ if retval != 0
+ return [400, stderr.join('')]
+ end
+ return [200, stdout.join('')]
+end
+
+def qdevice_net_client_init_certificate_storage(params, request, auth_user)
+ # Last step of adding qdevice into a cluster is distribution of corosync.conf
+ # file with qdevice settings. This requires FULL permissions currently.
+ # If that gets relaxed, we can require lower permissions in here as well.
+ unless allowed_for_local_cluster(auth_user, Permissions::FULL)
+ return 403, 'Permission denied'
+ end
+ stdout, stderr, retval = run_cmd_options(
+ auth_user,
+ {'stdin' => params[:ca_certificate]},
+ PCS, 'qdevice', 'net-client', 'setup'
+ )
+ if retval != 0
+ return [400, stderr.join('')]
+ end
+ return [200, stdout.join('')]
+end
+
+def qdevice_net_client_import_certificate(params, request, auth_user)
+ # Last step of adding qdevice into a cluster is distribution of corosync.conf
+ # file with qdevice settings. This requires FULL permissions currently.
+ # If that gets relaxed, we can require lower permissions in here as well.
+ unless allowed_for_local_cluster(auth_user, Permissions::FULL)
+ return 403, 'Permission denied'
+ end
+ stdout, stderr, retval = run_cmd_options(
+ auth_user,
+ {'stdin' => params[:certificate]},
+ PCS, 'qdevice', 'net-client', 'import-certificate'
+ )
+ if retval != 0
+ return [400, stderr.join('')]
+ end
+ return [200, stdout.join('')]
+end
+
+def qdevice_net_client_destroy(param, request, auth_user)
+ # When removing a qdevice from a cluster, an updated corosync.conf file
+ # with removed qdevice settings is distributed. This requires FULL permissions
+ # currently. If that gets relaxed, we can require lower permissions in here
+ # as well.
+ unless allowed_for_local_cluster(auth_user, Permissions::FULL)
+ return 403, 'Permission denied'
+ end
+ stdout, stderr, retval = run_cmd(
+ auth_user,
+ PCS, 'qdevice', 'net-client', 'destroy'
+ )
+ if retval != 0
+ return [400, stderr.join('')]
+ end
+ return [200, stdout.join('')]
+end
+
+def qdevice_client_disable(param, request, auth_user)
+ unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+ return 403, 'Permission denied'
+ end
+ if disable_service('corosync-qdevice')
+ msg = 'corosync-qdevice disabled'
+ $logger.info(msg)
+ return [200, msg]
+ else
+ msg = 'Disabling corosync-qdevice failed'
+ $logger.error(msg)
+ return [400, msg]
+ end
+end
+
+def qdevice_client_enable(param, request, auth_user)
+ unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+ return 403, 'Permission denied'
+ end
+ if not is_service_enabled?('corosync')
+ msg = 'corosync is not enabled, skipping'
+ $logger.info(msg)
+ return [200, msg]
+ elsif enable_service('corosync-qdevice')
+ msg = 'corosync-qdevice enabled'
+ $logger.info(msg)
+ return [200, msg]
+ else
+ msg = 'Enabling corosync-qdevice failed'
+ $logger.error(msg)
+ return [400, msg]
+ end
+end
+
+def qdevice_client_stop(param, request, auth_user)
+ unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+ return 403, 'Permission denied'
+ end
+ if stop_service('corosync-qdevice')
+ msg = 'corosync-qdevice stopped'
+ $logger.info(msg)
+ return [200, msg]
+ else
+ msg = 'Stopping corosync-qdevice failed'
+ $logger.error(msg)
+ return [400, msg]
+ end
+end
+
+def qdevice_client_start(param, request, auth_user)
+ unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+ return 403, 'Permission denied'
+ end
+ if not is_service_running?('corosync')
+ msg = 'corosync is not running, skipping'
+ $logger.info(msg)
+ return [200, msg]
+ elsif start_service('corosync-qdevice')
+ msg = 'corosync-qdevice started'
+ $logger.info(msg)
+ return [200, msg]
+ else
+ msg = 'Starting corosync-qdevice failed'
+ $logger.error(msg)
+ return [400, msg]
+ end
+end
diff --git a/pcsd/settings.rb b/pcsd/settings.rb
index 6229161..51f00ac 100644
--- a/pcsd/settings.rb
+++ b/pcsd/settings.rb
@@ -21,6 +21,12 @@ CIBADMIN = "/usr/sbin/cibadmin"
SBD_CONFIG = '/etc/sysconfig/sbd'
CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
+COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
+COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
+ COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt"
+)
+COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb"
+
SUPERUSER = 'hacluster'
ADMIN_GROUP = 'haclient'
$user_pass_file = "pcs_users.conf"
diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
index 7bc92a9..aae1b11 100644
--- a/pcsd/settings.rb.debian
+++ b/pcsd/settings.rb.debian
@@ -18,8 +18,14 @@ COROSYNC_BINARIES = "/usr/sbin/"
CMAN_TOOL = "/usr/sbin/cman_tool"
PACEMAKERD = "/usr/sbin/pacemakerd"
CIBADMIN = "/usr/sbin/cibadmin"
-SBD_CONFIG = '/etc/sysconfig/sbd'
-CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
+SBD_CONFIG = "/etc/sysconfig/sbd"
+CIB_PATH = "/var/lib/pacemaker/cib/cib.xml"
+
+COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
+COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
+ COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt"
+)
+COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb"
SUPERUSER = 'hacluster'
ADMIN_GROUP = 'haclient'
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index b14c327..5461515 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -298,14 +298,19 @@
{{meta_attributes-table resource=resource}}
{{#if utilization_support}}
{{#if resource.is_primitive}}
- {{utilization-table entity=resource utilization=resource.utilization type="resource"}}
+ {{utilization-table
+ entity=resource
+ utilization=resource.utilization
+ type="resource"
+ table_id="resource_utilization_attributes"
+ }}
{{/if}}
{{/if}}
<br style="clear:left;">
{{/unless}}
</div>
{{#if stonith}}
- <div style="clear:left; margin-top: 2em;" id="stonith_info_div">
+ <div style="clear:left; margin-top: 2em;" id="stonith_agent_form">
{{fence-form
resource=resource
agent=resource.resource_agent
@@ -314,7 +319,7 @@
</div>
{{else}}
{{#if resource.is_primitive}}
- <div style="clear:left; margin-top: 2em;" id="resource_info_div">
+ <div style="clear:left; margin-top: 2em;" id="resource_agent_form">
{{resource-form
resource=resource
agent=resource.resource_agent
@@ -725,7 +730,7 @@ Use the 'Add' button to submit the form.">
<tr>
<td
{{action toggleBody}}
- id="utilization_attributes"
+ {{bind-attr id=table_id}}
class="datatable_header hover-pointer"
>
{{#if show_content}}
diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb
index 478e0f6..8fccd25 100644
--- a/pcsd/views/nodes.erb
+++ b/pcsd/views/nodes.erb
@@ -247,9 +247,8 @@
</tr>
</table>
<table style="clear:left;float:left;margin-top:25px;">
- <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
+ <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites" id="node_attributes"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
<tr><td>
- <div id="node_attributes">
<table class="datatable">
<tr><th>Attribute</th><th>Value</th><th>Remove</th></tr>
{{#each attr in Pcs.nodesController.cur_node_attr}}
@@ -268,14 +267,12 @@
<td><button type="button" onclick="add_node_attr('#new_node_attr_col');" name="add">Add</button></td>
</tr>
</table>
- </div>
</td>
</tr>
</table>
<table style="clear:left;float:left;margin-top:25px;">
- <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
+ <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites" id="fence_levels"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
<tr><td>
- <div id="fencelevels">
<table class="datatable">
<tr><th>Level</th><th>Fence Devices</th><th>Remove</th></tr>
{{#each Pcs.nodesController.cur_node_fence_levels}}
@@ -301,13 +298,16 @@
<td><button type="button" onclick="add_remove_fence_level($(this).parent());" name="add">Add</button></td>
</tr>
</table>
- </div>
</td>
</tr>
</table>
{{#if Pcs.nodesController.utilization_support}}
<table style="clear:left; float:left; margin-top: 25px;"><tr><td>
- {{utilization-table entity=Pcs.nodesController.cur_node utilization=Pcs.nodesController.cur_node.utilization}}
+ {{utilization-table
+ entity=Pcs.nodesController.cur_node
+ utilization=Pcs.nodesController.cur_node.utilization
+ table_id="node_utilization_attributes"
+ }}
</td></tr></table>
{{/if}}
</div>
diff --git a/setup.py b/setup.py
index 0e267e0..0e8a45c 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ class CleanCommand(Command):
setup(
name='pcs',
- version='0.9.152',
+ version='0.9.153',
description='Pacemaker Configuration System',
author='Chris Feist',
author_email='cfeist at redhat.com',
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git
More information about the Debian-HA-Commits
mailing list