[Debian-ha-commits] [pcs] 01/04: New upstream version 0.9.163
Valentin Vidic
vvidic-guest at moszumanska.debian.org
Thu Feb 22 07:06:24 UTC 2018
This is an automated email from the git hooks/post-receive script.
vvidic-guest pushed a commit to branch master
in repository pcs.
commit c4d9c61a5010c08934e7e679a11dab96dc1e03c0
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date: Thu Feb 22 06:03:57 2018 +0100
New upstream version 0.9.163
---
CHANGELOG.md | 45 +-
Makefile | 1 -
README.md | 14 +-
pcs/app.py | 18 +-
pcs/cli/common/console_report.py | 58 +-
pcs/cli/common/parse_args.py | 2 +-
pcs/cli/common/reports.py | 2 +-
pcs/cli/common/test/test_console_report.py | 98 +-
pcs/cli/common/test/test_middleware.py | 17 +-
pcs/cli/constraint/command.py | 16 +-
pcs/cli/constraint/test/test_command.py | 2 +-
pcs/cli/constraint_colocation/command.py | 12 +-
pcs/cli/constraint_order/command.py | 12 +-
pcs/cli/constraint_ticket/command.py | 24 +-
pcs/cluster.py | 14 +-
pcs/common/report_codes.py | 4 +-
pcs/common/test/test_tools.py | 118 +-
pcs/common/tools.py | 41 +-
pcs/config.py | 11 +-
pcs/constraint.py | 18 +-
pcs/lib/booth/config_structure.py | 4 +-
pcs/lib/booth/env.py | 2 +-
pcs/lib/booth/test/test_config_structure.py | 10 +-
pcs/lib/booth/test/test_sync.py | 147 --
pcs/lib/cib/acl.py | 20 +-
pcs/lib/cib/constraint/constraint.py | 3 +-
pcs/lib/cib/constraint/resource_set.py | 2 +-
pcs/lib/cib/fencing_topology.py | 8 +-
pcs/lib/cib/resource/bundle.py | 2 +-
pcs/lib/cib/resource/group.py | 1 -
pcs/lib/cib/resource/remote_node.py | 4 +-
pcs/lib/cib/test/test_acl.py | 41 +-
pcs/lib/cib/test/test_alert.py | 31 +-
pcs/lib/cib/test/test_constraint.py | 2 +-
pcs/lib/cib/test/test_constraint_colocation.py | 2 +-
pcs/lib/cib/test/test_constraint_order.py | 2 +-
pcs/lib/cib/test/test_constraint_ticket.py | 2 +-
pcs/lib/cib/test/test_resource_group.py | 3 +-
pcs/lib/cib/test/test_resource_guest_node.py | 2 +-
pcs/lib/cib/test/test_resource_operations.py | 2 +-
pcs/lib/cib/test/test_resource_remote_node.py | 2 +-
pcs/lib/cib/test/test_resource_set.py | 2 +-
pcs/lib/cib/test/test_tools.py | 71 +-
pcs/lib/cib/tools.py | 77 +-
pcs/lib/commands/acl.py | 3 +-
pcs/lib/commands/alert.py | 3 +-
pcs/lib/commands/fencing_topology.py | 23 +-
pcs/lib/commands/quorum.py | 2 +-
pcs/lib/commands/resource.py | 16 +-
pcs/lib/commands/sbd.py | 31 +-
pcs/lib/commands/stonith.py | 19 +-
pcs/lib/commands/test/remote_node/__init__.py | 0
pcs/lib/commands/test/remote_node/fixtures_add.py | 222 +++
.../commands/test/remote_node/fixtures_remove.py | 174 ++
.../test/remote_node/test_node_add_guest.py | 457 +++++
.../test/remote_node/test_node_add_remote.py | 518 ++++++
.../test/remote_node/test_node_remove_guest.py | 474 ++++++
.../test/remote_node/test_node_remove_remote.py | 447 +++++
.../commands/test/resource/test_bundle_create.py | 16 +-
.../commands/test/resource/test_bundle_update.py | 14 +-
.../commands/test/resource/test_resource_create.py | 128 +-
pcs/lib/commands/test/sbd/test_disable_sbd.py | 257 ++-
pcs/lib/commands/test/sbd/test_enable_sbd.py | 1782 ++++++++++++++++++--
pcs/lib/commands/test/test_acl.py | 3 +-
pcs/lib/commands/test/test_alert.py | 13 +-
pcs/lib/commands/test/test_booth.py | 999 +++++++++--
pcs/lib/commands/test/test_fencing_topology.py | 5 +-
.../commands/test/test_quorum.py} | 1151 ++++++++-----
pcs/lib/commands/test/test_stonith.py | 188 +++
pcs/lib/commands/test/test_ticket.py | 5 +-
pcs/lib/communication/booth.py | 5 +-
pcs/lib/communication/qdevice_net.py | 1 +
pcs/lib/communication/sbd.py | 5 +-
pcs/lib/communication/test/test_booth.py | 29 +
pcs/lib/communication/test/test_corosync.py | 21 +
pcs/lib/communication/test/test_nodes.py | 7 +
pcs/lib/communication/test/test_qdevice.py | 34 +
pcs/lib/communication/test/test_qdevice_net.py | 38 +
pcs/lib/communication/test/test_sbd.py | 56 +
pcs/lib/communication/tools.py | 10 +
pcs/lib/corosync/config_facade.py | 6 +-
pcs/lib/env.py | 58 +-
pcs/lib/external.py | 2 +-
pcs/lib/pacemaker/live.py | 6 +-
pcs/lib/pacemaker/test/test_live.py | 13 +-
pcs/lib/reports.py | 63 +-
pcs/lib/resource_agent.py | 6 +-
pcs/lib/test/test_env.py | 480 +++---
pcs/lib/test/test_env_cib.py | 603 ++++---
pcs/lib/test/test_nodes_task.py | 691 --------
pcs/lib/test/test_resource_agent.py | 4 +-
pcs/lib/test/test_validate.py | 10 +-
pcs/lib/validate.py | 22 +-
pcs/pcs.8 | 13 +-
pcs/pcsd.py | 11 +-
pcs/quorum.py | 58 +-
pcs/resource.py | 14 +-
pcs/settings_default.py | 3 +-
pcs/snmp/agentx/updater.py | 4 +-
pcs/snmp/pcs_snmp_agent.8 | 2 +-
pcs/snmp/pcs_snmp_agent.logrotate | 10 -
pcs/snmp/pcs_snmp_agent.service | 2 +-
pcs/snmp/settings.py | 2 +-
pcs/status.py | 98 +-
pcs/stonith.py | 2 +-
pcs/test/cib_resource/common.py | 2 +-
pcs/test/cib_resource/test_create.py | 2 +-
pcs/test/cib_resource/test_manage_unmanage.py | 4 +-
pcs/test/resources/cib-empty-1.2.xml | 2 +-
.../{cib-empty-1.2.xml => cib-empty-2.0.xml} | 0
pcs/test/resources/cib-empty-2.6.xml | 2 +-
pcs/test/resources/cib-empty-2.8.xml | 2 +-
pcs/test/resources/cib-empty-with3nodes.xml | 2 +-
pcs/test/resources/cib-empty-withnodes.xml | 2 +-
pcs/test/resources/cib-empty.xml | 2 +-
pcs/test/resources/cib-large.xml | 2 +-
pcs/test/resources/cib-largefile.xml | 2 +-
.../resource_agent_ocf_pacemaker_remote.xml | 43 +
pcs/test/resources/stonith_agent_fence_simple.xml | 33 +
pcs/test/resources/stonithd_metadata.xml | 156 ++
pcs/test/suite.py | 15 +-
pcs/test/test_acl.py | 28 +-
pcs/test/test_cluster.py | 47 +-
pcs/test/test_cluster_pcmk_remote.py | 4 +-
pcs/test/test_constraints.py | 4 +-
pcs/test/test_lib_commands_sbd.py | 51 +-
pcs/test/test_lib_corosync_config_facade.py | 26 +-
pcs/test/test_lib_corosync_live.py | 22 +-
pcs/test/test_lib_corosync_qdevice_net.py | 320 +---
pcs/test/test_lib_sbd.py | 679 +-------
pcs/test/test_misc.py | 38 +
pcs/test/test_resource.py | 27 +-
pcs/test/test_status.py | 47 +-
pcs/test/tools/assertions.py | 101 +-
pcs/test/tools/case_analysis.py | 29 +
pcs/test/tools/command_env/assistant.py | 73 +-
pcs/test/tools/command_env/config.py | 27 +-
pcs/test/tools/command_env/config_env.py | 8 +
pcs/test/tools/command_env/config_fs.py | 65 +
pcs/test/tools/command_env/config_http.py | 270 +--
pcs/test/tools/command_env/config_http_booth.py | 73 +
pcs/test/tools/command_env/config_http_corosync.py | 135 ++
pcs/test/tools/command_env/config_http_host.py | 34 +
pcs/test/tools/command_env/config_http_pcmk.py | 44 +
pcs/test/tools/command_env/config_http_sbd.py | 84 +
pcs/test/tools/command_env/config_runner_cib.py | 27 +
pcs/test/tools/command_env/config_runner_pcmk.py | 108 +-
.../tools/command_env/config_runner_systemctl.py | 79 +
pcs/test/tools/command_env/mock_fs.py | 135 ++
.../tools/command_env/mock_node_communicator.py | 191 ++-
pcs/test/tools/command_env/mock_runner.py | 1 +
pcs/test/tools/command_env/tools.py | 13 +
pcs/test/tools/custom_mock.py | 9 +-
pcs/test/tools/fixture.py | 98 +-
pcs/test/tools/fixture_cib.py | 13 +
pcs/test/tools/pcs_unittest.py | 18 +-
pcs/usage.py | 28 +-
pcs/utils.py | 14 +-
pcsd/bootstrap.rb | 2 +-
pcsd/cfgsync.rb | 6 +-
pcsd/cluster_entity.rb | 22 +
pcsd/pcs.rb | 7 +-
pcsd/pcsd.8 | 4 +-
pcsd/pcsd.logrotate | 2 +-
pcsd/pcsd.rb | 12 +-
pcsd/pcsd.service | 2 +-
pcsd/pcsd.service.debian | 2 +-
pcsd/public/js/pcsd.js | 47 +-
pcsd/remote.rb | 2 +-
pcsd/test/cib1.xml | 2 +
pcsd/test/test_cluster_entity.rb | 53 +
pylintrc | 2 +-
setup.py | 2 +-
173 files changed, 9749 insertions(+), 3967 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8c9db4f..f550f42 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,46 @@
# Change Log
+## [0.9.163] - 2018-02-20
+
+### Added
+- Added `pcs status booth` as an alias to `pcs booth status`
+- A warning is displayed in `pcs status` and a stonith device detail in web UI
+ when a stonith device has its `method` option set to `cycle` ([rhbz#1523378])
+
+### Fixed
+- `--skip-offline` is no longer ignored in the `pcs quorum device remove`
+ command
+- pcs now waits up to 5 minutes (previously 10 seconds) for pcsd restart when
+ synchronizing pcsd certificates
+- Usage and man page now correctly state it is possible to enable or disable
+ several stonith devices at once
+- It is now possible to set the `action` option of stonith devices in web UI by
+ using force ([rhbz#1421702])
+- Do not crash when `--wait` is used in `pcs stonith create` ([rhbz#1522813])
+- Nodes are now authenticated after running `pcs cluster auth` even if
+ an existing corosync.conf defines no nodes ([ghissue#153], [rhbz#1517333])
+- Pcs now properly exits with code 1 when an error occurs in `pcs cluster node
+ add-remote` and `pcs cluster node add-guest` commands ([rhbz#1464781])
+- Fixed a crash in the `pcs booth sync` command ([rhbz#1527530])
+- Always replace the whole CIB instead of applying a diff when
+ crm\_feature\_set <= 3.0.8 ([rhbz#1488044])
+- Fixed `pcs cluster auth` in a cluster when not authenticated and using
+ a non-default port ([rhbz#1415197])
+- Fixed `pcs cluster auth` in a cluster when previously authenticated using a
+ non-default port and reauthenticating using an implicit default port
+ ([rhbz#1415197])
+
+[ghissue#153]: https://github.com/ClusterLabs/pcs/issues/153
+[rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197
+[rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702
+[rhbz#1464781]: https://bugzilla.redhat.com/show_bug.cgi?id=1464781
+[rhbz#1488044]: https://bugzilla.redhat.com/show_bug.cgi?id=1488044
+[rhbz#1517333]: https://bugzilla.redhat.com/show_bug.cgi?id=1517333
+[rhbz#1522813]: https://bugzilla.redhat.com/show_bug.cgi?id=1522813
+[rhbz#1523378]: https://bugzilla.redhat.com/show_bug.cgi?id=1523378
+[rhbz#1527530]: https://bugzilla.redhat.com/show_bug.cgi?id=1527530
+
+
## [0.9.162] - 2017-11-15
### Added
@@ -10,7 +51,7 @@
### Fixed
- Fixed crash when loading a huge xml ([rhbz#1506864])
-- Fixed adding an existing cluster into the GUI ([rhbz#1415197])
+- Fixed adding an existing cluster into the web UI ([rhbz#1415197])
- False warnings about failed actions when resource is master/unmaster from the
web UI ([rhbz#1506220])
@@ -283,7 +324,7 @@
more than one node is specified ([rhbz#1315992])
- Restarting pcsd initiated from pcs is now a synchronous operation
([rhbz#1284404])
-- Stopped bundling fonts used in pcsd GUI ([ghissue#125])
+- Stopped bundling fonts used in pcsd web UI ([ghissue#125])
- In `pcs resource create` flags `--master` and `--clone` changed to keywords
`master` and `clone`
- libcurl is now used for node to node communication
diff --git a/Makefile b/Makefile
index 04cd62a..5d4aed8 100644
--- a/Makefile
+++ b/Makefile
@@ -129,7 +129,6 @@ install: install_bundled_libs
install -d ${SNMP_MIB_DIR_FULL}
install -m 644 pcs/snmp/mibs/PCMK-PCS*-MIB.txt ${SNMP_MIB_DIR_FULL}
install -m 644 -D pcs/snmp/pcs_snmp_agent.conf ${DESTDIR}/etc/sysconfig/pcs_snmp_agent
- install -m 644 -D pcs/snmp/pcs_snmp_agent.logrotate ${DESTDIR}/etc/logrotate.d/pcs_snmp_agent
install -m 644 -D pcs/snmp/pcs_snmp_agent.8 ${DESTDIR}/${MANDIR}/man8/pcs_snmp_agent.8
ifeq ($(IS_SYSTEMCTL),true)
install -d ${DESTDIR}/${systemddir}/system/
diff --git a/README.md b/README.md
index b723e0d..f719c96 100644
--- a/README.md
+++ b/README.md
@@ -77,8 +77,8 @@ Start pcsd and make it start on boot:
Currently this is built into Fedora, RHEL and its clones and Debian and its
derivates.
-* [Fedora package git repositories](http://pkgs.fedoraproject.org/cgit/rpms/pcs.git/)
-* [Current Fedora .spec](http://pkgs.fedoraproject.org/cgit/rpms/pcs.git/tree/pcs.spec)
+* [Fedora package git repositories](https://src.fedoraproject.org/rpms/pcs)
+* [Current Fedora .spec](https://src.fedoraproject.org/rpms/pcs/blob/master/f/pcs.spec)
* [Debian-HA project home page](https://wiki.debian.org/Debian-HA)
---
@@ -145,11 +145,11 @@ Login as the `hacluster` user.
### Further Documentation
-[ClusterLabs website](http://clusterlabs.org) is an excellent place to learn
+[ClusterLabs website](https://clusterlabs.org) is an excellent place to learn
more about Pacemaker clusters.
-* [ClusterLabs quick start](http://clusterlabs.org/quickstart.html)
-* [Clusters from Scratch](http://clusterlabs.org/doc/en-US/Pacemaker/1.1-pcs/html/Clusters_from_Scratch/index.html)
-* [ClusterLabs documentation page](http://clusterlabs.org/doc/)
+* [ClusterLabs quick start](https://clusterlabs.org/quickstart.html)
+* [Clusters from Scratch](https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/index.html)
+* [ClusterLabs documentation page](https://clusterlabs.org/pacemaker/doc/)
---
@@ -158,5 +158,5 @@ If you have any bug reports or feature requests please feel free to open a
github issue on the pcs project.
Alternatively you can use ClusterLabs
-[users mailinglist](http://oss.clusterlabs.org/mailman/listinfo/users)
+[users mailinglist](https://oss.clusterlabs.org/mailman/listinfo/users)
which is also a great place to ask Pacemaker clusters related questions.
diff --git a/pcs/app.py b/pcs/app.py
index f60a81c..742f655 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -168,35 +168,39 @@ def main(argv=None):
"acl": lambda argv: acl.acl_cmd(
utils.get_library_wrapper(),
argv,
- utils.get_modificators()
+ utils.get_modifiers()
+ ),
+ "status": lambda argv: status.status_cmd(
+ utils.get_library_wrapper(),
+ argv,
+ utils.get_modifiers()
),
- "status": status.status_cmd,
"config": config.config_cmd,
"pcsd": pcsd.pcsd_cmd,
"node": lambda argv: node.node_cmd(
utils.get_library_wrapper(),
argv,
- utils.get_modificators()
+ utils.get_modifiers()
),
"quorum": lambda argv: quorum.quorum_cmd(
utils.get_library_wrapper(),
argv,
- utils.get_modificators()
+ utils.get_modifiers()
),
"qdevice": lambda argv: qdevice.qdevice_cmd(
utils.get_library_wrapper(),
argv,
- utils.get_modificators()
+ utils.get_modifiers()
),
"alert": lambda args: alert.alert_cmd(
utils.get_library_wrapper(),
args,
- utils.get_modificators()
+ utils.get_modifiers()
),
"booth": lambda argv: booth.booth_cmd(
utils.get_library_wrapper(),
argv,
- utils.get_modificators()
+ utils.get_modifiers()
),
}
if command not in cmd_map:
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index 406532d..8d0cc7e 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -15,6 +15,20 @@ from pcs.common.tools import is_string
INSTANCE_SUFFIX = "@{0}"
NODE_PREFIX = "{0}: "
+_type_translation = {
+ "acl_group": "ACL group",
+ "acl_permission": "ACL permission",
+ "acl_role": "ACL role",
+ "acl_target": "ACL user",
+ "primitive": "resource",
+}
+_type_articles = {
+ "ACL group": "an",
+ "ACL user": "an",
+ "ACL role": "an",
+ "ACL permission": "an",
+}
+
def warn(message):
sys.stdout.write(format_message(message, "Warning: "))
@@ -80,22 +94,30 @@ def service_operation_skipped(operation, info):
**info
)
+def typelist_to_string(type_list, article=False):
+ if not type_list:
+ return ""
+ new_list = sorted([
+ # get a translation or make a type_name a string
+ _type_translation.get(type_name, "{0}".format(type_name))
+ for type_name in type_list
+ ])
+ types = "/".join(new_list)
+ if not article:
+ return types
+ return "{article} {types}".format(
+ article=_type_articles.get(new_list[0], "a"),
+ types=types
+ )
+
def id_belongs_to_unexpected_type(info):
- translate_expected = {
- "acl_group": "an acl group",
- "acl_target": "an acl user",
- "group": "a group",
- }
return "'{id}' is not {expected_type}".format(
id=info["id"],
- expected_type="/".join([
- translate_expected.get(tag, "{0}".format(tag))
- for tag in info["expected_types"]
- ]),
+ expected_type=typelist_to_string(info["expected_types"], article=True)
)
def id_not_found(info):
- desc = format_optional(info["id_description"], "{0} ")
+ desc = format_optional(typelist_to_string(info["expected_types"]), "{0} ")
if not info["context_type"] or not info["context_id"]:
return "{desc}'{id}' does not exist".format(desc=desc, id=info["id"])
@@ -131,7 +153,7 @@ def resource_running_on_nodes(info):
]))
)
-def invalid_option(info):
+def invalid_options(info):
template = "invalid {desc}option{plural_options} {option_names_list},"
if not info["allowed"] and not info["allowed_patterns"]:
template += " there are no options allowed"
@@ -227,7 +249,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
)
,
- codes.INVALID_OPTION: invalid_option,
+ codes.INVALID_OPTIONS: invalid_options,
codes.INVALID_OPTION_VALUE: lambda info:
#value on key "allowed_values" is overloaded:
@@ -735,6 +757,15 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
+ codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET: lambda info:
+ (
+ "Replacing the whole CIB instead of applying a diff, a race "
+ "condition may happen if the CIB is pushed more than once "
+ "simultaneously. To fix this, upgrade pacemaker to get "
+ "crm_feature_set at least {required_set}, current is {current_set}."
+ ).format(**info)
+ ,
+
codes.CIB_SAVE_TMP_ERROR: lambda info:
"Unable to save CIB to a temporary file: {reason}"
.format(**info)
@@ -1355,4 +1386,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
"--Debug Content Start--\n{content}\n--Debug Content End--\n"
).format(**info)
,
+ codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE:
+ "Unable to perform operation on any available node/host, therefore it "
+ "is not possible to continue."
}
diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
index 70b926c..0ee6c4e 100644
--- a/pcs/cli/common/parse_args.py
+++ b/pcs/cli/common/parse_args.py
@@ -79,7 +79,7 @@ def group_by_keywords(
group_repeated_keywords=None, only_found_keywords=False
):
"""
- Return dictionary with keywords as keys and following argumets as value.
+ Return dictionary with keywords as keys and following arguments as value.
For example when keywords are "first" and "seconds" then for arg_list
["first", 1, 2, "second", 3] it returns {"first": [1, 2], "second": [3]}
diff --git a/pcs/cli/common/reports.py b/pcs/cli/common/reports.py
index 5fd39cb..f96db73 100644
--- a/pcs/cli/common/reports.py
+++ b/pcs/cli/common/reports.py
@@ -130,7 +130,7 @@ def process_library_reports(report_item_list):
report_item_list list of ReportItem
"""
if not report_item_list:
- error("Errors have occurred, therefore pcs is unable to continue")
+ raise error("Errors have occurred, therefore pcs is unable to continue")
critical_error = False
for report_item in report_item_list:
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
index 2798912..a5bfed1 100644
--- a/pcs/cli/common/test/test_console_report.py
+++ b/pcs/cli/common/test/test_console_report.py
@@ -45,8 +45,8 @@ class NameBuildTest(TestCase):
)
-class BuildInvalidOptionMessageTest(NameBuildTest):
- code = codes.INVALID_OPTION
+class BuildInvalidOptionsMessageTest(NameBuildTest):
+ code = codes.INVALID_OPTIONS
def test_build_message_with_type(self):
self.assert_message_from_info(
"invalid TYPE option 'NAME', allowed options are: FIRST, SECOND",
@@ -678,18 +678,24 @@ class ResourceOperationIntevalAdaptedTest(NameBuildTest):
class IdBelongsToUnexpectedType(NameBuildTest):
code = codes.ID_BELONGS_TO_UNEXPECTED_TYPE
def test_build_message_with_data(self):
- self.assert_message_from_info("'ID' is not primitive/master/clone", {
- "id": "ID",
- "expected_types": ["primitive", "master", "clone"],
- "current_type": "op",
- })
-
- def test_build_message_with_transformation(self):
- self.assert_message_from_info("'ID' is not a group", {
- "id": "ID",
- "expected_types": ["group"],
- "current_type": "op",
- })
+ self.assert_message_from_info(
+ "'ID' is not a clone/master/resource",
+ {
+ "id": "ID",
+ "expected_types": ["primitive", "master", "clone"],
+ "current_type": "op",
+ }
+ )
+
+ def test_build_message_with_transformation_and_article(self):
+ self.assert_message_from_info(
+ "'ID' is not an ACL group/ACL user",
+ {
+ "id": "ID",
+ "expected_types": ["acl_target", "acl_group"],
+ "current_type": "op",
+ }
+ )
class ResourceRunOnNodes(NameBuildTest):
code = codes.RESOURCE_RUNNING_ON_NODES
@@ -2011,3 +2017,67 @@ class ResourceRefreshTooTimeConsuming(NameBuildTest):
"threshold": 25,
}
)
+
+
+class IdNotFound(NameBuildTest):
+ code = codes.ID_NOT_FOUND
+ def test_id(self):
+ self.assert_message_from_info(
+ "'ID' does not exist",
+ {
+ "id": "ID",
+ "expected_types": [],
+ "context_type": "",
+ "context_id": "",
+ }
+ )
+
+ def test_id_and_type(self):
+ self.assert_message_from_info(
+ "clone/master/resource 'ID' does not exist",
+ {
+ "id": "ID",
+ "expected_types": ["primitive", "master", "clone"],
+ "context_type": "",
+ "context_id": "",
+ }
+ )
+
+ def test_context(self):
+ self.assert_message_from_info(
+ "there is no 'ID' in the C_TYPE 'C_ID'",
+ {
+ "id": "ID",
+ "expected_types": [],
+ "context_type": "C_TYPE",
+ "context_id": "C_ID",
+ }
+ )
+
+ def test_type_and_context(self):
+ self.assert_message_from_info(
+ "there is no ACL user 'ID' in the C_TYPE 'C_ID'",
+ {
+ "id": "ID",
+ "expected_types": ["acl_target"],
+ "context_type": "C_TYPE",
+ "context_id": "C_ID",
+ }
+ )
+
+
+class CibPushForcedFullDueToCrmFeatureSet(NameBuildTest):
+ code = codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET
+ def test_success(self):
+ self.assert_message_from_info(
+ (
+ "Replacing the whole CIB instead of applying a diff, a race "
+ "condition may happen if the CIB is pushed more than once "
+ "simultaneously. To fix this, upgrade pacemaker to get "
+ "crm_feature_set at least 3.0.9, current is 3.0.6."
+ ),
+ {
+ "required_set": "3.0.9",
+ "current_set": "3.0.6",
+ }
+ )
diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py
index 37b855f..5e51782 100644
--- a/pcs/cli/common/test/test_middleware.py
+++ b/pcs/cli/common/test/test_middleware.py
@@ -12,21 +12,21 @@ from pcs.cli.common import middleware
class MiddlewareBuildTest(TestCase):
def test_run_middleware_correctly_chained(self):
log = []
- def command(lib, argv, modificators):
- log.append('command: {0}, {1}, {2}'.format(lib, argv, modificators))
+ def command(lib, argv, modifiers):
+ log.append('command: {0}, {1}, {2}'.format(lib, argv, modifiers))
- def m1(next, lib, argv, modificators):
+ def m1(next, lib, argv, modifiers):
log.append(
- 'm1 start: {0}, {1}, {2}'.format(lib, argv, modificators)
+ 'm1 start: {0}, {1}, {2}'.format(lib, argv, modifiers)
)
- next(lib, argv, modificators)
+ next(lib, argv, modifiers)
log.append('m1 done')
- def m2(next, lib, argv, modificators):
+ def m2(next, lib, argv, modifiers):
log.append(
- 'm2 start: {0}, {1}, {2}'.format(lib, argv, modificators)
+ 'm2 start: {0}, {1}, {2}'.format(lib, argv, modifiers)
)
- next(lib, argv, modificators)
+ next(lib, argv, modifiers)
log.append('m2 done')
run_with_middleware = middleware.build(m1, m2)
@@ -38,4 +38,3 @@ class MiddlewareBuildTest(TestCase):
'm2 done',
'm1 done',
])
-
diff --git a/pcs/cli/constraint/command.py b/pcs/cli/constraint/command.py
index 7d9f8cf..ff4e86f 100644
--- a/pcs/cli/constraint/command.py
+++ b/pcs/cli/constraint/command.py
@@ -7,21 +7,21 @@ from __future__ import (
from pcs.cli.constraint import parse_args, console_report
from pcs.cli.common.console_report import indent
-def create_with_set(create_with_set_library_call, argv, modificators):
+def create_with_set(create_with_set_library_call, argv, modifiers):
"""
callable create_with_set_library_call create constraint with set
list argv part of comandline args
see usage for "constraint (colocation|resource|ticket) set"
- dict like object modificators can contain
+ dict like object modifiers can contain
"force" allows resource in clone/master and constraint duplicity
"autocorrect" allows correct resource to its clone/master parent
"""
resource_set_list, constraint_options = parse_args.prepare_set_args(argv)
create_with_set_library_call(
resource_set_list, constraint_options,
- can_repair_to_clone=modificators["autocorrect"],
- resource_in_clone_alowed=modificators["force"],
- duplication_alowed=modificators["force"],
+ can_repair_to_clone=modifiers["autocorrect"],
+ resource_in_clone_alowed=modifiers["force"],
+ duplication_alowed=modifiers["force"],
)
def show_constraints_with_set(constraint_list, show_detail, indent_step=2):
@@ -39,7 +39,7 @@ def show_constraints_with_set(constraint_list, show_detail, indent_step=2):
indent_step=indent_step
)
-def show(caption, load_constraints, format_options, modificators):
+def show(caption, load_constraints, format_options, modifiers):
"""
load constraints and return console lines list with info about constraints
string caption for example "Ticket Constraints:"
@@ -47,9 +47,9 @@ def show(caption, load_constraints, format_options, modificators):
like {"plain": [], "with_resource_sets": []}
callable format_options takes dict of options and show_detail flag (bool)
and returns string with constraint formated for commandline
- modificators dict like object with command modificators
+ modifiers dict like object with command modifiers
"""
- show_detail = modificators["full"]
+ show_detail = modifiers["full"]
constraints = load_constraints()
line_list = [caption]
diff --git a/pcs/cli/constraint/test/test_command.py b/pcs/cli/constraint/test/test_command.py
index 0d79b10..98c7992 100644
--- a/pcs/cli/constraint/test/test_command.py
+++ b/pcs/cli/constraint/test/test_command.py
@@ -53,7 +53,7 @@ class ShowTest(TestCase):
"caption",
load_constraints=lambda: {"plain": [], "with_resource_sets": []},
format_options=lambda: None,
- modificators={"full": False}
+ modifiers={"full": False}
))
def test_show_constraints_full(self):
diff --git a/pcs/cli/constraint_colocation/command.py b/pcs/cli/constraint_colocation/command.py
index 6182506..58e551d 100644
--- a/pcs/cli/constraint_colocation/command.py
+++ b/pcs/cli/constraint_colocation/command.py
@@ -8,31 +8,31 @@ from pcs.cli.constraint import command
from pcs.cli.constraint_colocation import console_report
-def create_with_set(lib, argv, modificators):
+def create_with_set(lib, argv, modifiers):
"""
create colocation constraint with resource set
object lib exposes library
list argv see usage for "constraint colocation set"
- dict like object modificators can contain
+ dict like object modifiers can contain
"force" allows resource in clone/master and constraint duplicity
"autocorrect" allows correct resource to its clone/master parent
"""
command.create_with_set(
lib.constraint_colocation.set,
argv,
- modificators,
+ modifiers,
)
-def show(lib, argv, modificators):
+def show(lib, argv, modifiers):
"""
show all colocation constraints
object lib exposes library
list argv see usage for "constraint colocation show"
- dict like object modificators can contain "full"
+ dict like object modifiers can contain "full"
"""
print("\n".join(command.show(
"Colocation Constraints:",
lib.constraint_colocation.show,
console_report.constraint_plain,
- modificators,
+ modifiers,
)))
diff --git a/pcs/cli/constraint_order/command.py b/pcs/cli/constraint_order/command.py
index b0d4fc9..5890c7a 100644
--- a/pcs/cli/constraint_order/command.py
+++ b/pcs/cli/constraint_order/command.py
@@ -8,31 +8,31 @@ from pcs.cli.constraint import command
from pcs.cli.constraint_order import console_report
-def create_with_set(lib, argv, modificators):
+def create_with_set(lib, argv, modifiers):
"""
create order constraint with resource set
object lib exposes library
list argv see usage for "constraint colocation set"
- dict like object modificators can contain
+ dict like object modifiers can contain
"force" allows resource in clone/master and constraint duplicity
"autocorrect" allows correct resource to its clone/master parent
"""
command.create_with_set(
lib.constraint_order.set,
argv,
- modificators
+ modifiers
)
-def show(lib, argv, modificators):
+def show(lib, argv, modifiers):
"""
show all order constraints
object lib exposes library
list argv see usage for "constraint colocation show"
- dict like object modificators can contain "full"
+ dict like object modifiers can contain "full"
"""
print("\n".join(command.show(
"Ordering Constraints:",
lib.constraint_order.show,
console_report.constraint_plain,
- modificators,
+ modifiers,
)))
diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py
index df761ae..ecc3405 100644
--- a/pcs/cli/constraint_ticket/command.py
+++ b/pcs/cli/constraint_ticket/command.py
@@ -9,27 +9,27 @@ from pcs.cli.constraint import command
from pcs.cli.constraint_ticket import parse_args, console_report
from pcs.cli.common.console_report import error
-def create_with_set(lib, argv, modificators):
+def create_with_set(lib, argv, modifiers):
"""
create ticket constraint with resource set
object lib exposes library
list argv see usage for "constraint colocation set"
- dict like object modificators can contain
+ dict like object modifiers can contain
"force" allows resource in clone/master and constraint duplicity
"autocorrect" allows correct resource to its clone/master parent
"""
command.create_with_set(
lib.constraint_ticket.set,
argv,
- modificators,
+ modifiers,
)
-def add(lib, argv, modificators):
+def add(lib, argv, modifiers):
"""
create ticket constraint
object lib exposes library
list argv see usage for "constraint colocation add"
- dict like object modificators can contain
+ dict like object modifiers can contain
"force" allows resource in clone/master and constraint duplicity
"autocorrect" allows correct resource to its clone/master parent
"""
@@ -47,28 +47,28 @@ def add(lib, argv, modificators):
ticket,
resource_id,
options,
- autocorrection_allowed=modificators["autocorrect"],
- resource_in_clone_alowed=modificators["force"],
- duplication_alowed=modificators["force"],
+ autocorrection_allowed=modifiers["autocorrect"],
+ resource_in_clone_alowed=modifiers["force"],
+ duplication_alowed=modifiers["force"],
)
-def remove(lib, argv, modificators):
+def remove(lib, argv, modifiers):
if len(argv) != 2:
raise CmdLineInputError()
ticket, resource_id = argv
if not lib.constraint_ticket.remove(ticket, resource_id):
raise error("no matching ticket constraint found")
-def show(lib, argv, modificators):
+def show(lib, argv, modifiers):
"""
show all ticket constraints
object lib exposes library
list argv see usage for "constraint colocation show"
- dict like object modificators can contain "full"
+ dict like object modifiers can contain "full"
"""
print("\n".join(command.show(
"Ticket Constraints:",
lib.constraint_ticket.show,
console_report.constraint_plain,
- modificators,
+ modifiers,
)))
diff --git a/pcs/cluster.py b/pcs/cluster.py
index a330164..50f05f7 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -141,7 +141,7 @@ def cluster_cmd(argv):
node.node_standby_cmd(
utils.get_library_wrapper(),
argv,
- utils.get_modificators(),
+ utils.get_modifiers(),
True
)
except LibraryError as e:
@@ -153,7 +153,7 @@ def cluster_cmd(argv):
node.node_standby_cmd(
utils.get_library_wrapper(),
argv,
- utils.get_modificators(),
+ utils.get_modifiers(),
False
)
except LibraryError as e:
@@ -206,10 +206,10 @@ def cluster_cmd(argv):
remote_node_command_map[argv[0]](
utils.get_library_wrapper(),
argv[1:],
- utils.get_modificators()
+ utils.get_modifiers()
)
except LibraryError as e:
- utils.process_library_reports(e.args)
+ process_library_reports(e.args)
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(
e, "cluster", "node " + argv[0]
@@ -338,7 +338,7 @@ def cluster_certkey(argv):
def cluster_setup(argv):
- modifiers = utils.get_modificators()
+ modifiers = utils.get_modifiers()
allowed_encryption_values = ["0", "1"]
if modifiers["encryption"] not in allowed_encryption_values:
process_library_reports([
@@ -1632,7 +1632,7 @@ def cluster_node(argv):
node_add_outside_cluster(
utils.get_library_wrapper(),
argv[1:],
- utils.get_modificators(),
+ utils.get_modifiers(),
)
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(e, "cluster", "node")
@@ -1668,7 +1668,7 @@ def cluster_node(argv):
utils.err(msg)
lib_env = utils.get_lib_env()
- modifiers = utils.get_modificators()
+ modifiers = utils.get_modifiers()
if add_node == True:
node_add(lib_env, node0, node1, modifiers)
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index 6ab619f..caf2fe5 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -71,6 +71,7 @@ CIB_FENCING_LEVEL_DOES_NOT_EXIST = "CIB_FENCING_LEVEL_DOES_NOT_EXIST"
CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING"
+CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET = "CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET"
CIB_PUSH_ERROR = "CIB_PUSH_ERROR"
CIB_SAVE_TMP_ERROR = "CIB_SAVE_TMP_ERROR"
CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED"
@@ -121,7 +122,7 @@ ID_NOT_FOUND = 'ID_NOT_FOUND'
IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
INVALID_CIB_CONTENT = "INVALID_CIB_CONTENT"
INVALID_ID = "INVALID_ID"
-INVALID_OPTION = "INVALID_OPTION"
+INVALID_OPTIONS = "INVALID_OPTIONS"
INVALID_USERDEFINED_OPTIONS = "INVALID_USERDEFINED_OPTIONS"
INVALID_OPTION_TYPE = "INVALID_OPTION_TYPE"
INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE"
@@ -249,6 +250,7 @@ UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA'
UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG"
UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG"
UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS"
+UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE = "UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE"
UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
WATCHDOG_INVALID = "WATCHDOG_INVALID"
UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS"
diff --git a/pcs/common/test/test_tools.py b/pcs/common/test/test_tools.py
index 0b28f84..389fd8f 100644
--- a/pcs/common/test/test_tools.py
+++ b/pcs/common/test/test_tools.py
@@ -5,7 +5,10 @@ from __future__ import (
)
from pcs.test.tools.pcs_unittest import TestCase
-from pcs.common.tools import is_string
+from pcs.common.tools import (
+ is_string,
+ Version
+)
class IsString(TestCase):
def test_recognize_plain_string(self):
@@ -21,3 +24,116 @@ class IsString(TestCase):
def test_list_of_string_is_not_string(self):
self.assertFalse(is_string(["a", "b"]))
+
+
+class VersionTest(TestCase):
+ def assert_asterisk(self, expected, major, minor=None, revision=None):
+ self.assertEqual(expected, (major, minor, revision))
+
+ def assert_eq_tuple(self, a, b):
+ self.assert_eq(Version(*a), Version(*b))
+
+ def assert_lt_tuple(self, a, b):
+ self.assert_lt(Version(*a), Version(*b))
+
+ def assert_eq(self, a, b):
+ self.assertTrue(a == b)
+ self.assertFalse(a != b)
+ self.assertFalse(a < b)
+ self.assertTrue(a <= b)
+ self.assertFalse(a > b)
+ self.assertTrue(a >= b)
+
+ def assert_lt(self, a, b):
+ self.assertFalse(a == b)
+ self.assertTrue(a != b)
+ self.assertTrue(a < b)
+ self.assertTrue(a <= b)
+ self.assertFalse(a > b)
+ self.assertFalse(a >= b)
+
+ def test_major(self):
+ ver = Version(2)
+ self.assert_asterisk((2, None, None), *ver)
+ self.assertEqual(ver.major, 2)
+ self.assertEqual(ver[0], 2)
+ self.assertEqual(ver.minor, None)
+ self.assertEqual(ver[1], None)
+ self.assertEqual(ver.revision, None)
+ self.assertEqual(ver[2], None)
+ self.assertEqual(ver.as_full_tuple, (2, 0, 0))
+ self.assertEqual(str(ver), "2")
+ self.assertEqual(str(ver.normalize()), "2.0.0")
+
+ def test_major_minor(self):
+ ver = Version(2, 3)
+ self.assert_asterisk((2, 3, None), *ver)
+ self.assertEqual(ver.major, 2)
+ self.assertEqual(ver[0], 2)
+ self.assertEqual(ver.minor, 3)
+ self.assertEqual(ver[1], 3)
+ self.assertEqual(ver.revision, None)
+ self.assertEqual(ver[2], None)
+ self.assertEqual(ver.as_full_tuple, (2, 3, 0))
+ self.assertEqual(str(ver), "2.3")
+ self.assertEqual(str(ver.normalize()), "2.3.0")
+
+ def test_major_minor_revision(self):
+ ver = Version(2, 3, 4)
+ self.assert_asterisk((2, 3, 4), *ver)
+ self.assertEqual(ver.major, 2)
+ self.assertEqual(ver[0], 2)
+ self.assertEqual(ver.minor, 3)
+ self.assertEqual(ver[1], 3)
+ self.assertEqual(ver.revision, 4)
+ self.assertEqual(ver[2], 4)
+ self.assertEqual(ver.as_full_tuple, (2, 3, 4))
+ self.assertEqual(str(ver), "2.3.4")
+ self.assertEqual(str(ver.normalize()), "2.3.4")
+
+ def test_compare(self):
+ self.assert_eq_tuple((2, ), (2, ))
+ self.assert_lt_tuple((2, ), (3, ))
+
+
+ self.assert_eq_tuple((2, 0), (2, 0))
+ self.assert_lt_tuple((2, 0), (2, 5))
+ self.assert_lt_tuple((2, 0), (3, 5))
+
+ self.assert_eq_tuple((2, 0), (2, ))
+ self.assert_lt_tuple((2, 0), (3, ))
+ self.assert_lt_tuple((2, 5), (3, ))
+ self.assert_lt_tuple((3, ), (3, 5))
+
+
+ self.assert_eq_tuple((2, 0, 0), (2, 0, 0))
+ self.assert_lt_tuple((2, 0, 0), (2, 0, 1))
+ self.assert_lt_tuple((2, 0, 0), (2, 5, 0))
+ self.assert_lt_tuple((2, 0, 0), (2, 5, 1))
+ self.assert_lt_tuple((2, 0, 0), (3, 0, 0))
+ self.assert_lt_tuple((2, 0, 0), (3, 0, 1))
+ self.assert_lt_tuple((2, 0, 0), (3, 5, 0))
+ self.assert_lt_tuple((2, 0, 0), (3, 5, 1))
+
+ self.assert_eq_tuple((2, 0, 0), (2, 0))
+ self.assert_eq_tuple((2, 0, 0), (2, ))
+ self.assert_lt_tuple((2, 0, 0), (2, 5))
+ self.assert_lt_tuple((2, 0, 0), (3, ))
+
+ self.assert_lt_tuple((2, 5, 0), (3, ))
+ self.assert_lt_tuple((2, ), (2, 5, 0))
+ self.assert_eq_tuple((2, 5, 0), (2, 5))
+ self.assert_lt_tuple((2, 5, 0), (3, 5))
+
+ self.assert_lt_tuple((2, 0), (2, 5, 1))
+ self.assert_lt_tuple((2, 5), (2, 5, 1))
+ self.assert_lt_tuple((2, 5, 1), (3, 5))
+ self.assert_lt_tuple((2, 5, 1), (3, ))
+ self.assert_lt_tuple((2, ), (2, 5, 1))
+ self.assert_lt_tuple((2, 5, 1), (3, ))
+
+ self.assert_lt_tuple((2, ), (3, 5, 1))
+ self.assert_lt_tuple((3, ), (3, 5, 1))
+ self.assert_lt_tuple((2, 0), (3, 5, 1))
+ self.assert_lt_tuple((2, 5), (3, 5, 1))
+ self.assert_lt_tuple((3, 5), (3, 5, 1))
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
index 382057e..2a66b97 100644
--- a/pcs/common/tools.py
+++ b/pcs/common/tools.py
@@ -4,11 +4,12 @@ from __future__ import (
print_function,
)
-import sys
+from collections import namedtuple
from lxml import etree
import threading
+import sys
-_PYTHON2 = sys.version[0] == "2"
+_PYTHON2 = (sys.version_info.major == 2)
def simple_cache(func):
cache = {
@@ -78,3 +79,39 @@ def xml_fromstring(xml):
#see https://bugzilla.redhat.com/show_bug.cgi?id=1506864
etree.XMLParser(huge_tree=True)
)
+
+class Version(namedtuple("Version", ["major", "minor", "revision"])):
+ def __new__(cls, major, minor=None, revision=None):
+ return super(Version, cls).__new__(cls, major, minor, revision)
+
+ @property
+ def as_full_tuple(self):
+ return (
+ self.major,
+ self.minor if self.minor is not None else 0,
+ self.revision if self.revision is not None else 0,
+ )
+
+ def normalize(self):
+ return self.__class__(*self.as_full_tuple)
+
+ def __str__(self):
+ return ".".join([str(x) for x in self if x is not None])
+
+ def __lt__(self, other):
+ return self.as_full_tuple < other.as_full_tuple
+
+ def __le__(self, other):
+ return self.as_full_tuple <= other.as_full_tuple
+
+ def __eq__(self, other):
+ return self.as_full_tuple == other.as_full_tuple
+
+ def __ne__(self, other):
+ return self.as_full_tuple != other.as_full_tuple
+
+ def __gt__(self, other):
+ return self.as_full_tuple > other.as_full_tuple
+
+ def __ge__(self, other):
+ return self.as_full_tuple >= other.as_full_tuple
diff --git a/pcs/config.py b/pcs/config.py
index ac32b66..11ddb90 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -124,7 +124,7 @@ def config_show(argv):
def config_show_cib():
lib = utils.get_library_wrapper()
- modificators = utils.get_modificators()
+ modifiers = utils.get_modifiers()
print("Resources:")
utils.pcs_options["--all"] = 1
@@ -143,12 +143,12 @@ def config_show_cib():
print()
constraint.location_show([])
- order_command.show(lib, [], modificators)
- colocation_command.show(lib, [], modificators)
- ticket_command.show(lib, [], modificators)
+ order_command.show(lib, [], modifiers)
+ colocation_command.show(lib, [], modifiers)
+ ticket_command.show(lib, [], modifiers)
print()
- alert.print_alert_config(lib, [], modificators)
+ alert.print_alert_config(lib, [], modifiers)
print()
del utils.pcs_options["--all"]
@@ -926,4 +926,3 @@ def run_clufter(cmd_name, cmd_args, debug, force, err_prefix):
+ "\n"
)
sys.exit(1 if result is None else result)
-
diff --git a/pcs/constraint.py b/pcs/constraint.py
index 3705c07..704ead3 100644
--- a/pcs/constraint.py
+++ b/pcs/constraint.py
@@ -42,7 +42,7 @@ RESOURCE_TYPE_REGEXP = "regexp"
def constraint_cmd(argv):
lib = utils.get_library_wrapper()
- modificators = utils.get_modificators()
+ modifiers = utils.get_modifiers()
if len(argv) == 0:
argv = ["list"]
@@ -79,7 +79,7 @@ def constraint_cmd(argv):
if (sub_cmd2 == "set"):
try:
- order_command.create_with_set(lib, argv, modificators)
+ order_command.create_with_set(lib, argv, modifiers)
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(e, "constraint", 'order set')
except LibraryError as e:
@@ -87,7 +87,7 @@ def constraint_cmd(argv):
elif (sub_cmd2 in ["remove","delete"]):
order_rm(argv)
elif (sub_cmd2 == "show"):
- order_command.show(lib, argv, modificators)
+ order_command.show(lib, argv, modifiers)
else:
order_start([sub_cmd2] + argv)
elif sub_cmd == "ticket":
@@ -104,7 +104,7 @@ def constraint_cmd(argv):
raise CmdLineInputError()
usage_name = "ticket "+sub_command
- command_map[sub_command](lib, argv[1:], modificators)
+ command_map[sub_command](lib, argv[1:], modifiers)
except LibraryError as e:
utils.process_library_reports(e.args)
except CmdLineInputError as e:
@@ -123,13 +123,13 @@ def constraint_cmd(argv):
elif (sub_cmd2 == "set"):
try:
- colocation_command.create_with_set(lib, argv, modificators)
+ colocation_command.create_with_set(lib, argv, modifiers)
except LibraryError as e:
utils.process_library_reports(e.args)
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(e, "constraint", "colocation set")
elif (sub_cmd2 == "show"):
- colocation_command.show(lib, argv, modificators)
+ colocation_command.show(lib, argv, modifiers)
else:
usage.constraint()
sys.exit(1)
@@ -137,9 +137,9 @@ def constraint_cmd(argv):
constraint_rm(argv)
elif (sub_cmd == "show" or sub_cmd == "list"):
location_show(argv)
- order_command.show(lib, argv, modificators)
- colocation_command.show(lib, argv, modificators)
- ticket_command.show(lib, argv, modificators)
+ order_command.show(lib, argv, modifiers)
+ colocation_command.show(lib, argv, modifiers)
+ ticket_command.show(lib, argv, modifiers)
elif (sub_cmd == "ref"):
constraint_ref(argv)
elif (sub_cmd == "rule"):
diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py
index f418e48..c2c6449 100644
--- a/pcs/lib/booth/config_structure.py
+++ b/pcs/lib/booth/config_structure.py
@@ -108,7 +108,7 @@ def validate_ticket_options(report_processor, options, allow_unknown_options):
reports = []
for key in sorted(options):
if key in GLOBAL_KEYS:
- reports.append(common_reports.invalid_option(
+ reports.append(common_reports.invalid_options(
[key],
TICKET_KEYS,
"booth ticket",
@@ -116,7 +116,7 @@ def validate_ticket_options(report_processor, options, allow_unknown_options):
elif key not in TICKET_KEYS:
reports.append(
- common_reports.invalid_option(
+ common_reports.invalid_options(
[key],
TICKET_KEYS,
"booth ticket",
diff --git a/pcs/lib/booth/env.py b/pcs/lib/booth/env.py
index a91404a..c5a69b7 100644
--- a/pcs/lib/booth/env.py
+++ b/pcs/lib/booth/env.py
@@ -61,7 +61,7 @@ def set_keyfile_access(file_path):
except EnvironmentError as e:
raise report_keyfile_io_error(file_path, "chown", e)
try:
- os.chmod(file_path, 0o600)
+ os.chmod(file_path, settings.pacemaker_authkey_file_mode)
except EnvironmentError as e:
raise report_keyfile_io_error(file_path, "chmod", e)
diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
index 83de549..9db7dd7 100644
--- a/pcs/lib/booth/test/test_config_structure.py
+++ b/pcs/lib/booth/test/test_config_structure.py
@@ -56,7 +56,7 @@ class ValidateTicketOptionsTest(TestCase):
expected_errors = [
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["site"],
"option_type": "booth ticket",
@@ -66,7 +66,7 @@ class ValidateTicketOptionsTest(TestCase):
),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["port"],
"option_type": "booth ticket",
@@ -85,7 +85,7 @@ class ValidateTicketOptionsTest(TestCase):
),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["unknown"],
"option_type": "booth ticket",
@@ -118,7 +118,7 @@ class ValidateTicketOptionsTest(TestCase):
expected_errors = [
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["site"],
"option_type": "booth ticket",
@@ -142,7 +142,7 @@ class ValidateTicketOptionsTest(TestCase):
expected_errors + [
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["unknown"],
"option_type": "booth ticket",
diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py
index 8a8d97a..fbc5bd1 100644
--- a/pcs/lib/booth/test/test_sync.py
+++ b/pcs/lib/booth/test/test_sync.py
@@ -33,104 +33,6 @@ import pcs.lib.booth.sync as lib
def to_b64(string):
return base64.b64encode(string.encode("utf-8")).decode("utf-8")
- at skip("TODO: rewrite for pcs.lib.communication.booth.BoothSendConfig")
-class SetConfigOnNodeTest(TestCase):
- def setUp(self):
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_rep = MockLibraryReportProcessor()
- self.node = NodeAddresses("node")
-
- def test_with_authfile(self):
- lib._set_config_on_node(
- self.mock_com,
- self.mock_rep,
- self.node,
- "cfg_name",
- "cfg",
- authfile="/abs/path/my-key.key",
- authfile_data="test key".encode("utf-8")
- )
- self.assertEqual(1, self.mock_com.call_node.call_count)
- self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0])
- self.assertEqual(
- "remote/booth_set_config", self.mock_com.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_com.call_node.call_args[0][2])
- self.assertTrue("data_json" in data)
- self.assertEqual(
- {
- "config": {
- "name": "cfg_name.conf",
- "data": "cfg"
- },
- "authfile": {
- "name": "my-key.key",
- "data": to_b64("test key")
- }
- },
- json.loads(data["data_json"][0])
- )
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
- {
- "node": self.node.label,
- "name_list": ["cfg_name"]
- }
- )]
- )
-
- def _assert(self):
- self.assertEqual(1, self.mock_com.call_node.call_count)
- self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0])
- self.assertEqual(
- "remote/booth_set_config", self.mock_com.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_com.call_node.call_args[0][2])
- self.assertTrue("data_json" in data)
- self.assertEqual(
- {
- "config": {
- "name": "cfg_name.conf",
- "data": "cfg"
- }
- },
- json.loads(data["data_json"][0])
- )
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
- {
- "node": self.node.label,
- "name_list": ["cfg_name"]
- }
- )]
- )
-
- def test_authfile_data_None(self):
- lib._set_config_on_node(
- self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg",
- authfile="key.key"
- )
- self._assert()
-
- def test_authfile_only_data(self):
- lib._set_config_on_node(
- self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg",
- authfile_data="key".encode("utf-8")
- )
- self._assert()
-
- def test_without_authfile(self):
- lib._set_config_on_node(
- self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg"
- )
- self._assert()
-
@skip("TODO: rewrite for pcs.lib.communication.booth.BoothSaveFiles")
@mock.patch("pcs.lib.booth.sync.parallel_nodes_communication_helper")
class SyncConfigInCluster(TestCase):
@@ -1046,52 +948,3 @@ class SendAllConfigToNodeTest(TestCase):
)
]
)
-
- at skip("TODO: rewrite for pcs.lib.communication.booth.BoothGetConfig")
-class PullConfigFromNodeTest(TestCase):
- def setUp(self):
- self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- self.node = NodeAddresses("node")
-
- def test_success(self):
- self.mock_communicator.call_node.return_value = "{}"
- self.assertEqual(
- {}, lib.pull_config_from_node(
- self.mock_communicator, self.node, "booth"
- )
- )
- self.mock_communicator.call_node.assert_called_once_with(
- self.node, "remote/booth_get_config", "name=booth"
- )
-
- def test_not_json(self):
- self.mock_communicator.call_node.return_value = "not json"
- assert_raise_library_error(
- lambda: lib.pull_config_from_node(
- self.mock_communicator, self.node, "booth"
- ),
- (
- Severities.ERROR,
- report_codes.INVALID_RESPONSE_FORMAT,
- {"node": self.node.label}
- )
- )
-
- def test_communication_failure(self):
- self.mock_communicator.call_node.side_effect = NodeConnectionException(
- self.node.label, "command", "reason"
- )
- assert_raise_library_error(
- lambda: lib.pull_config_from_node(
- self.mock_communicator, self.node, "booth"
- ),
- (
- Severities.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": self.node.label,
- "command": "command",
- "reason": "reason"
- }
- )
- )
diff --git a/pcs/lib/cib/acl.py b/pcs/lib/cib/acl.py
index 59fdf13..fde49ee 100644
--- a/pcs/lib/cib/acl.py
+++ b/pcs/lib/cib/acl.py
@@ -24,13 +24,6 @@ TAG_ROLE = "acl_role"
TAG_TARGET = "acl_target"
TAG_PERMISSION = "acl_permission"
-TAG_DESCRIPTION_MAP = {
- TAG_GROUP: "group",
- TAG_ROLE: "role",
- TAG_TARGET: "user",
- TAG_PERMISSION: "permission"
-}
-
def validate_permissions(tree, permission_info_list):
"""
Validate given permission list.
@@ -59,25 +52,20 @@ def validate_permissions(tree, permission_info_list):
))
if scope_type == 'id' and not does_id_exist(tree, scope):
- report_items.append(reports.id_not_found(scope, "id"))
+ report_items.append(reports.id_not_found(scope, ["id"]))
if report_items:
raise LibraryError(*report_items)
def _find(
- tag, acl_section, element_id, none_if_id_unused=False, id_description=None
+ tag, acl_section, element_id, none_if_id_unused=False, id_types=None
):
- if tag not in TAG_DESCRIPTION_MAP.keys():
- raise AssertionError("Unknown acl tag '{0}'".format(tag))
-
return find_element_by_tag_and_id(
tag,
acl_section,
element_id,
- id_description=id_description if id_description
- else TAG_DESCRIPTION_MAP[tag]
- ,
+ id_types=id_types,
none_if_id_unused=none_if_id_unused,
)
@@ -112,7 +100,7 @@ def find_target_or_group(acl_section, target_or_group_id):
return find_group(
acl_section,
target_or_group_id,
- id_description="user/group"
+ id_types=[TAG_GROUP, TAG_TARGET]
)
def create_role(acl_section, role_id, description=None):
diff --git a/pcs/lib/cib/constraint/constraint.py b/pcs/lib/cib/constraint/constraint.py
index f5075f8..02af9d0 100644
--- a/pcs/lib/cib/constraint/constraint.py
+++ b/pcs/lib/cib/constraint/constraint.py
@@ -29,7 +29,7 @@ def _validate_attrib_names(attrib_names, options):
]
if invalid_names:
raise LibraryError(
- reports.invalid_option(invalid_names, attrib_names, None)
+ reports.invalid_options(invalid_names, attrib_names, None)
)
def find_valid_resource_id(
@@ -40,7 +40,6 @@ def find_valid_resource_id(
parent_tags + [resource.primitive.TAG, resource.group.TAG],
cib,
id,
- id_description="resource"
)
if resource_element.tag in parent_tags:
diff --git a/pcs/lib/cib/constraint/resource_set.py b/pcs/lib/cib/constraint/resource_set.py
index 8eba876..6ec1820 100644
--- a/pcs/lib/cib/constraint/resource_set.py
+++ b/pcs/lib/cib/constraint/resource_set.py
@@ -32,7 +32,7 @@ def validate_options(options):
for name, value in options.items():
if name not in ATTRIB:
raise LibraryError(
- reports.invalid_option([name], list(ATTRIB.keys()), None)
+ reports.invalid_options([name], list(ATTRIB.keys()), None)
)
if value not in ATTRIB[name]:
raise LibraryError(
diff --git a/pcs/lib/cib/fencing_topology.py b/pcs/lib/cib/fencing_topology.py
index 003178b..91ae77a 100644
--- a/pcs/lib/cib/fencing_topology.py
+++ b/pcs/lib/cib/fencing_topology.py
@@ -67,10 +67,10 @@ def remove_levels_by_params(
object reporter -- report processor
etree topology_el -- etree element to remove the levels from
- int|string level -- level (index) of the new fencing level
- constant target_type -- the new fencing level target value type
- mixed target_value -- the new fencing level target value
- Iterable devices -- list of stonith devices for the new fencing level
+ int|string level -- level (index) of the fencing level to remove
+ constant target_type -- the removed fencing level target value type
+ mixed target_value -- the removed fencing level target value
+ Iterable devices -- list of stonith devices of the removed fencing level
bool ignore_if_missing -- when True, do not raise if level not found
"""
if target_type:
diff --git a/pcs/lib/cib/resource/bundle.py b/pcs/lib/cib/resource/bundle.py
index a159c1d..d99fbed 100644
--- a/pcs/lib/cib/resource/bundle.py
+++ b/pcs/lib/cib/resource/bundle.py
@@ -464,7 +464,7 @@ def _validate_map_ids_exist(bundle_el, map_type, map_label, id_list):
for id in id_list:
try:
find_element_by_tag_and_id(
- map_type, bundle_el, id, id_description=map_label
+ map_type, bundle_el, id, id_types=[map_label]
)
except LibraryError as e:
report_list.extend(e.args)
diff --git a/pcs/lib/cib/resource/group.py b/pcs/lib/cib/resource/group.py
index 37c9df0..f865b30 100644
--- a/pcs/lib/cib/resource/group.py
+++ b/pcs/lib/cib/resource/group.py
@@ -65,7 +65,6 @@ def place_resource(
"primitive",
group_element,
adjacent_resource_id,
- id_description="resource",
)
if put_after_adjacent and adjacent_resource.getnext() is None:
diff --git a/pcs/lib/cib/resource/remote_node.py b/pcs/lib/cib/resource/remote_node.py
index 423ce83..2fe1096 100644
--- a/pcs/lib/cib/resource/remote_node.py
+++ b/pcs/lib/cib/resource/remote_node.py
@@ -123,7 +123,7 @@ def get_host(resource_element):
def _validate_server_not_used(agent, option_dict):
if "server" in option_dict:
- return [reports.invalid_option(
+ return [reports.invalid_options(
["server"],
sorted([
attr["name"] for attr in agent.get_parameters()
@@ -210,7 +210,7 @@ def create(
)
except LibraryError as e:
for report in e.args:
- if report.code == report_codes.INVALID_OPTION:
+ if report.code == report_codes.INVALID_OPTIONS:
report.info["allowed"] = [
value for value in report.info["allowed"]
if value != "server"
diff --git a/pcs/lib/cib/test/test_acl.py b/pcs/lib/cib/test/test_acl.py
index cda52bb..c4c55c9 100644
--- a/pcs/lib/cib/test/test_acl.py
+++ b/pcs/lib/cib/test/test_acl.py
@@ -144,16 +144,22 @@ class ValidatePermissionsTest(LibraryAclTest):
report_codes.ID_NOT_FOUND,
{
"id": "id",
- "id_description": "id",
- }
+ "expected_types": ["id"],
+ "context_type": "",
+ "context_id": "",
+ },
+ None
),
(
severities.ERROR,
report_codes.ID_NOT_FOUND,
{
"id": "last",
- "id_description": "id",
- }
+ "expected_types": ["id"],
+ "context_type": "",
+ "context_id": "",
+ },
+ None
)
)
@@ -973,31 +979,30 @@ class FindTargetOrGroup(TestCase):
find_group.assert_called_once_with(
"acl_section",
"group_id",
- id_description="user/group"
+ id_types=["acl_group", "acl_target"]
)
class Find(TestCase):
- def test_refuses_bad_tag(self):
- self.assertRaises(
- AssertionError,
- lambda: lib._find("bad_tag", "acl_section", "id")
- )
-
@mock.patch("pcs.lib.cib.acl.find_element_by_tag_and_id")
def test_map_well_to_common_finder(self, common_finder):
common_finder.return_value = "element"
- self.assertEqual("element", lib._find(
- lib.TAG_GROUP, "acl_section", "group_id",
- none_if_id_unused=True,
- id_description="some description"
- ))
+ self.assertEqual(
+ "element",
+ lib._find(
+ lib.TAG_GROUP,
+ "acl_section",
+ "group_id",
+ none_if_id_unused=True,
+ id_types=["some", "types"]
+ )
+ )
common_finder.assert_called_once_with(
lib.TAG_GROUP,
"acl_section",
"group_id",
none_if_id_unused=True,
- id_description="some description"
+ id_types=["some", "types"]
)
@mock.patch("pcs.lib.cib.acl.find_element_by_tag_and_id")
@@ -1011,5 +1016,5 @@ class Find(TestCase):
"acl_section",
"group_id",
none_if_id_unused=True,
- id_description=lib.TAG_DESCRIPTION_MAP[lib.TAG_GROUP]
+ id_types=None
)
diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
index 37eea67..71b5ce7 100644
--- a/pcs/lib/cib/test/test_alert.py
+++ b/pcs/lib/cib/test/test_alert.py
@@ -344,10 +344,11 @@ class UpdateAlertTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "alert0",
+ "expected_types": ["alert"],
"context_type": "alerts",
"context_id": "",
- "id_description": "alert"
- }
+ },
+ None
)
)
@@ -390,10 +391,11 @@ class RemoveAlertTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "not-existing-id",
+ "expected_types": ["alert"],
"context_type": "alerts",
"context_id": "",
- "id_description": "alert"
- }
+ },
+ None
)
)
@@ -550,10 +552,11 @@ class AddRecipientTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "alert1",
+ "expected_types": ["alert"],
"context_type": "alerts",
"context_id": "",
- "id_description": "alert"
- }
+ },
+ None
)
)
@@ -868,14 +871,17 @@ class UpdateRecipientTest(TestCase):
def test_recipient_not_exists(self):
assert_raise_library_error(
lambda: alert.update_recipient(
- self.mock_reporter, self.tree, "recipient"),
+ self.mock_reporter, self.tree, "missing-recipient"),
(
severities.ERROR,
report_codes.ID_NOT_FOUND,
{
- "id": "recipient",
- "id_description": "recipient"
- }
+ "id": "missing-recipient",
+ "expected_types": ["recipient"],
+ "context_type": "alerts",
+ "context_id": "",
+ },
+ None
)
)
@@ -922,10 +928,11 @@ class RemoveRecipientTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "recipient",
+ "expected_types": ["recipient"],
"context_type": "alerts",
"context_id": "",
- "id_description": "recipient",
- }
+ },
+ None
)
)
diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py
index 9774414..aa0f21c 100644
--- a/pcs/lib/cib/test/test_constraint.py
+++ b/pcs/lib/cib/test/test_constraint.py
@@ -217,7 +217,7 @@ class PrepareOptionsTest(TestCase):
),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["b"],
"option_type": None,
diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py
index 5c4713d..25923bb 100644
--- a/pcs/lib/cib/test/test_constraint_colocation.py
+++ b/pcs/lib/cib/test/test_constraint_colocation.py
@@ -83,7 +83,7 @@ class PrepareOptionsWithSetTest(TestCase):
}),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["unknown"],
"option_type": None,
diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py
index 57bbff5..c43fa2c 100644
--- a/pcs/lib/cib/test/test_constraint_order.py
+++ b/pcs/lib/cib/test/test_constraint_order.py
@@ -92,7 +92,7 @@ class PrepareOptionsWithSetTest(TestCase):
}),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["unknown"],
"option_type": None,
diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
index 6c76f43..ff91f25 100644
--- a/pcs/lib/cib/test/test_constraint_ticket.py
+++ b/pcs/lib/cib/test/test_constraint_ticket.py
@@ -69,7 +69,7 @@ class PrepareOptionsPlainTest(TestCase):
),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["unknown"],
"option_type": None,
diff --git a/pcs/lib/cib/test/test_resource_group.py b/pcs/lib/cib/test/test_resource_group.py
index ffba240..e3f7b34 100644
--- a/pcs/lib/cib/test/test_resource_group.py
+++ b/pcs/lib/cib/test/test_resource_group.py
@@ -117,10 +117,11 @@ class PlaceResource(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "r",
- "id_description": "resource",
+ "expected_types": ["primitive"],
"context_type": "group",
"context_id": "g",
},
+ None
),
)
diff --git a/pcs/lib/cib/test/test_resource_guest_node.py b/pcs/lib/cib/test/test_resource_guest_node.py
index 0e33420..4f4228d 100644
--- a/pcs/lib/cib/test/test_resource_guest_node.py
+++ b/pcs/lib/cib/test/test_resource_guest_node.py
@@ -140,7 +140,7 @@ class ValidateOptions(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_type": "guest",
"option_names": ["invalid"],
diff --git a/pcs/lib/cib/test/test_resource_operations.py b/pcs/lib/cib/test/test_resource_operations.py
index 6cb9000..87055a1 100644
--- a/pcs/lib/cib/test/test_resource_operations.py
+++ b/pcs/lib/cib/test/test_resource_operations.py
@@ -268,7 +268,7 @@ class ValidateOperation(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["unknown"],
"option_type": "resource operation",
diff --git a/pcs/lib/cib/test/test_resource_remote_node.py b/pcs/lib/cib/test/test_resource_remote_node.py
index 2fea22e..7f01339 100644
--- a/pcs/lib/cib/test/test_resource_remote_node.py
+++ b/pcs/lib/cib/test/test_resource_remote_node.py
@@ -274,7 +274,7 @@ class Validate(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
'option_type': 'resource',
'option_names': ['server'],
diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py
index 72ba337..5e0d75c 100644
--- a/pcs/lib/cib/test/test_resource_set.py
+++ b/pcs/lib/cib/test/test_resource_set.py
@@ -38,7 +38,7 @@ class PrepareSetTest(TestCase):
}),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["invalid_name"],
"option_type": None,
diff --git a/pcs/lib/cib/test/test_tools.py b/pcs/lib/cib/test/test_tools.py
index 62d1d45..fab39ce 100644
--- a/pcs/lib/cib/test/test_tools.py
+++ b/pcs/lib/cib/test/test_tools.py
@@ -13,11 +13,13 @@ from pcs.test.tools.assertions import (
assert_raise_library_error,
assert_report_item_list_equal,
)
+from pcs.test.tools import fixture
from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_unittest import mock
from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
from pcs.common import report_codes
+from pcs.common.tools import Version
from pcs.lib.errors import ReportItemSeverity as severities
from pcs.lib.cib import tools as lib
@@ -436,7 +438,7 @@ class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
def test_no_revision(self):
self.assertEqual(
- (1, 2, 0),
+ Version(1, 2),
lib.get_pacemaker_version_by_which_cib_was_validated(
etree.XML('<cib validate-with="pacemaker-1.2"/>')
)
@@ -444,13 +446,68 @@ class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
def test_with_revision(self):
self.assertEqual(
- (1, 2, 3),
+ Version(1, 2, 3),
lib.get_pacemaker_version_by_which_cib_was_validated(
etree.XML('<cib validate-with="pacemaker-1.2.3"/>')
)
)
+class getCibCrmFeatureSet(TestCase):
+ def test_success(self):
+ self.assertEqual(
+ Version(3, 0, 9),
+ lib.get_cib_crm_feature_set(
+ etree.XML('<cib crm_feature_set="3.0.9" />')
+ )
+ )
+
+ def test_success_no_revision(self):
+ self.assertEqual(
+ Version(3, 1),
+ lib.get_cib_crm_feature_set(
+ etree.XML('<cib crm_feature_set="3.1" />')
+ )
+ )
+
+ def test_missing_attribute(self):
+ assert_raise_library_error(
+ lambda: lib.get_cib_crm_feature_set(
+ etree.XML("<cib />")
+ ),
+ fixture.error(
+ report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
+ reason=(
+ "the attribute 'crm_feature_set' of the element 'cib' is "
+ "missing"
+ )
+ )
+ )
+
+ def test_missing_attribute_none(self):
+ self.assertEqual(
+ None,
+ lib.get_cib_crm_feature_set(
+ etree.XML('<cib />'),
+ none_if_missing=True
+ )
+ )
+
+ def test_invalid_version(self):
+ assert_raise_library_error(
+ lambda: lib.get_cib_crm_feature_set(
+ etree.XML('<cib crm_feature_set="3" />')
+ ),
+ fixture.error(
+ report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
+ reason=(
+ "the attribute 'crm_feature_set' of the element 'cib' has "
+ "an invalid value: '3'"
+ )
+ )
+ )
+
+
find_group = partial(lib.find_element_by_tag_and_id, "group")
class FindTagWithId(TestCase):
def test_returns_element_when_exists(self):
@@ -532,25 +589,29 @@ class FindTagWithId(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "a",
- "id_description": "group",
+ "expected_types": ["group"],
"context_type": "resources",
"context_id": "",
},
+ None
),
)
assert_raise_library_error(
lambda: find_group(
tree.find('.//resources'),
"a",
- id_description="resource group"
+ id_types=["resource group"]
),
(
severities.ERROR,
report_codes.ID_NOT_FOUND,
{
"id": "a",
- "id_description": "resource group",
+ "expected_types": ["resource group"],
+ "context_type": "resources",
+ "context_id": "",
},
+ None
),
)
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index 60173c1..59724d4 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -6,7 +6,7 @@ from __future__ import (
import re
-from pcs.common.tools import is_string
+from pcs.common.tools import is_string, Version
from pcs.lib import reports
from pcs.lib.cib import sections
from pcs.lib.errors import LibraryError
@@ -16,6 +16,8 @@ from pcs.lib.pacemaker.values import (
)
from pcs.lib.xml_tools import get_root
+_VERSION_FORMAT = r"(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?"
+
class IdProvider(object):
"""
Book ids for future use in the CIB and generate new ids accordingly
@@ -121,7 +123,7 @@ def find_unique_id(tree, check_id, reserved_ids=None):
return temp_id
def find_element_by_tag_and_id(
- tag, context_element, element_id, none_if_id_unused=False, id_description=""
+ tag, context_element, element_id, none_if_id_unused=False, id_types=None
):
"""
Return element with given tag and element_id under context_element. When
@@ -133,9 +135,16 @@ def find_element_by_tag_and_id(
string element_id is id of search element
bool none_if_id_unused if the element is not found then return None if True
or raise a LibraryError if False
- string id_description optional description for id
+ list id_types optional list of descriptions for id / expected types of id
"""
tag_list = [tag] if is_string(tag) else tag
+ if id_types is None:
+ id_type_list = tag_list
+ elif is_string(id_types):
+ id_type_list = [id_types]
+ else:
+ id_type_list = id_types
+
element_list = context_element.xpath(
'.//*[({0}) and @id="{1}"]'.format(
" or ".join(["self::{0}".format(one_tag) for one_tag in tag_list]),
@@ -171,7 +180,7 @@ def find_element_by_tag_and_id(
raise LibraryError(
reports.id_not_found(
element_id,
- id_description if id_description else "/".join(tag_list),
+ id_type_list,
context_element.tag,
context_element.attrib.get("id", "")
)
@@ -238,31 +247,55 @@ def get_resources(tree):
"""
return sections.get(tree, sections.RESOURCES)
-def get_pacemaker_version_by_which_cib_was_validated(cib):
- """
- Return version of pacemaker which validated specified cib as tree.
- Version is returned as tuple of integers: (<major>, <minor>, <revision>).
- Raises LibraryError on any failure.
-
- cib -- cib etree
- """
- version = cib.get("validate-with")
+def _get_cib_version(cib, attribute, regexp, none_if_missing=False):
+ version = cib.get(attribute)
if version is None:
+ if none_if_missing:
+ return None
raise LibraryError(reports.cib_load_error_invalid_format(
- "the attribute 'validate-with' of the element 'cib' is missing"
+ "the attribute '{0}' of the element 'cib' is missing".format(
+ attribute
+ )
))
-
- regexp = re.compile(
- r"pacemaker-(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?"
- )
match = regexp.match(version)
if not match:
raise LibraryError(reports.cib_load_error_invalid_format(
- "the attribute 'validate-with' of the element 'cib' has an invalid"
- " value: '{0}'".format(version)
+ (
+ "the attribute '{0}' of the element 'cib' has an invalid"
+ " value: '{1}'"
+ ).format(attribute, version)
))
- return (
+ return Version(
int(match.group("major")),
int(match.group("minor")),
- int(match.group("rev") or 0)
+ int(match.group("rev")) if match.group("rev") else None
+ )
+
+def get_pacemaker_version_by_which_cib_was_validated(cib):
+ """
+ Return version of pacemaker which validated specified cib as tree.
+ Version is returned as an instance of pcs.common.tools.Version.
+ Raises LibraryError on any failure.
+
+ cib -- cib etree
+ """
+ return _get_cib_version(
+ cib,
+ "validate-with",
+ re.compile(r"pacemaker-{0}".format(_VERSION_FORMAT))
+ )
+
+def get_cib_crm_feature_set(cib, none_if_missing=False):
+ """
+ Return crm_feature_set as pcs.common.tools.Version or raise LibraryError
+
+ etree cib -- cib etree
+ bool none_if_missing -- return None instead of raising when crm_feature_set
+ is missing
+ """
+ return _get_cib_version(
+ cib,
+ "crm_feature_set",
+ re.compile(_VERSION_FORMAT),
+ none_if_missing=none_if_missing
)
diff --git a/pcs/lib/commands/acl.py b/pcs/lib/commands/acl.py
index 37e3f14..7b1905c 100644
--- a/pcs/lib/commands/acl.py
+++ b/pcs/lib/commands/acl.py
@@ -6,11 +6,12 @@ from __future__ import (
from contextlib import contextmanager
+from pcs.common.tools import Version
from pcs.lib.cib import acl
from pcs.lib.cib.tools import get_acls
-REQUIRED_CIB_VERSION = (2, 0, 0)
+REQUIRED_CIB_VERSION = Version(2, 0, 0)
@contextmanager
def cib_acl_section(env):
diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
index 73f6f1e..fb403b9 100644
--- a/pcs/lib/commands/alert.py
+++ b/pcs/lib/commands/alert.py
@@ -4,12 +4,13 @@ from __future__ import (
print_function,
)
+from pcs.common.tools import Version
from pcs.lib import reports
from pcs.lib.cib import alert
from pcs.lib.errors import LibraryError
-REQUIRED_CIB_VERSION = (2, 5, 0)
+REQUIRED_CIB_VERSION = Version(2, 5, 0)
def create_alert(
diff --git a/pcs/lib/commands/fencing_topology.py b/pcs/lib/commands/fencing_topology.py
index 07c5e6b..ba894b7 100644
--- a/pcs/lib/commands/fencing_topology.py
+++ b/pcs/lib/commands/fencing_topology.py
@@ -8,6 +8,7 @@ from pcs.common.fencing_topology import (
TARGET_TYPE_REGEXP,
TARGET_TYPE_ATTRIBUTE,
)
+from pcs.common.tools import Version
from pcs.lib.cib import fencing_topology as cib_fencing_topology
from pcs.lib.cib.tools import (
get_fencing_topology,
@@ -23,7 +24,7 @@ def add_level(
"""
Validate and add a new fencing level
- LibraryError lib_env -- environment
+ LibraryEnvironment lib_env -- environment
int|string level -- level (index) of the new fencing level
constant target_type -- the new fencing level target value type
mixed target_value -- the new fencing level target value
@@ -33,9 +34,9 @@ def add_level(
"""
version_check = None
if target_type == TARGET_TYPE_REGEXP:
- version_check = (2, 3, 0)
+ version_check = Version(2, 3, 0)
elif target_type == TARGET_TYPE_ATTRIBUTE:
- version_check = (2, 4, 0)
+ version_check = Version(2, 4, 0)
cib = lib_env.get_cib(version_check)
cib_fencing_topology.add_level(
@@ -62,7 +63,7 @@ def get_config(lib_env):
Return a list of levels where each level is a dict with keys: target_type,
target_value. level and devices. Devices is a list of stonith device ids.
- LibraryError lib_env -- environment
+ LibraryEnvironment lib_env -- environment
"""
cib = lib_env.get_cib()
return cib_fencing_topology.export(get_fencing_topology(cib))
@@ -70,7 +71,7 @@ def get_config(lib_env):
def remove_all_levels(lib_env):
"""
Remove all fencing levels
- LibraryError lib_env -- environment
+ LibraryEnvironment lib_env -- environment
"""
cib_fencing_topology.remove_all_levels(
get_fencing_topology(lib_env.get_cib())
@@ -84,11 +85,11 @@ def remove_levels_by_params(
"""
Remove specified fencing level(s)
- LibraryError lib_env -- environment
- int|string level -- level (index) of the new fencing level
- constant target_type -- the new fencing level target value type
- mixed target_value -- the new fencing level target value
- Iterable devices -- list of stonith devices for the new fencing level
+ LibraryEnvironment lib_env -- environment
+ int|string level -- level (index) of the fencing level to remove
+ constant target_type -- the removed fencing level target value type
+ mixed target_value -- the removed fencing level target value
+ Iterable devices -- list of stonith devices of the removed fencing level
bool ignore_if_missing -- when True, do not report if level not found
"""
cib_fencing_topology.remove_levels_by_params(
@@ -107,7 +108,7 @@ def verify(lib_env):
"""
Check if all cluster nodes and stonith devices used in fencing levels exist
- LibraryError lib_env -- environment
+ LibraryEnvironment lib_env -- environment
"""
cib = lib_env.get_cib()
cib_fencing_topology.verify(
diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
index 3e9db0e..6b869ed 100644
--- a/pcs/lib/commands/quorum.py
+++ b/pcs/lib/commands/quorum.py
@@ -325,7 +325,7 @@ def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
reporter.process(
reports.qdevice_certificate_removal_started()
)
- com_cmd = qdevice_net_com.ClientDestroy(reporter)
+ com_cmd = qdevice_net_com.ClientDestroy(reporter, skip_offline_nodes)
com_cmd.set_targets(
lib_env.get_node_target_factory().get_target_list(cluster_nodes)
)
diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
index 8cda310..0e2746f 100644
--- a/pcs/lib/commands/resource.py
+++ b/pcs/lib/commands/resource.py
@@ -8,6 +8,7 @@ from contextlib import contextmanager
from functools import partial
from pcs.common import report_codes
+from pcs.common.tools import Version
from pcs.lib import reports
from pcs.lib.cib import resource
from pcs.lib.cib.resource import operations, remote_node, guest_node
@@ -457,7 +458,7 @@ def create_into_bundle(
or
resource.common.are_meta_disabled(meta_attributes)
),
- required_cib_version=(2, 8, 0)
+ required_cib_version=Version(2, 8, 0)
) as resources_section:
_check_special_cases(
env,
@@ -481,7 +482,9 @@ def create_into_bundle(
resource.common.disable(primitive_element)
resource.bundle.add_resource(
find_element_by_tag_and_id(
- "bundle", resources_section, bundle_id
+ resource.bundle.TAG,
+ resources_section,
+ bundle_id
),
primitive_element
)
@@ -523,7 +526,7 @@ def bundle_create(
or
resource.common.are_meta_disabled(meta_attributes)
),
- required_cib_version=(2, 8, 0)
+ required_cib_version=Version(2, 8, 0)
) as resources_section:
# no need to run validations related to remote and guest nodes as those
# nodes can only be created from primitive resources
@@ -589,7 +592,7 @@ def bundle_update(
env,
wait,
[bundle_id],
- required_cib_version=(2, 8, 0)
+ required_cib_version=Version(2, 8, 0)
) as resources_section:
# no need to run validations related to remote and guest nodes as those
# nodes can only be created from primitive resources
@@ -682,7 +685,7 @@ def _resource_list_enable_disable(resource_el_list, func, cluster_state):
report_list.append(
reports.id_not_found(
res_id,
- id_description="resource/clone/master/group/bundle"
+ ["primitive", "clone", "master", "group", "bundle"]
)
)
return report_list
@@ -784,8 +787,7 @@ def _find_resources_or_raise(
find_element_by_tag_and_id(
resource_tags,
resources_section,
- res_id,
- id_description="resource/clone/master/group/bundle"
+ res_id
)
)
)
diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
index 8304942..61f7af1 100644
--- a/pcs/lib/commands/sbd.py
+++ b/pcs/lib/commands/sbd.py
@@ -41,6 +41,14 @@ from pcs.lib.validate import (
)
+UNSUPPORTED_SBD_OPTION_LIST = [
+ "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER", "SBD_DEVICE"
+]
+ALLOWED_SBD_OPTION_LIST = [
+ "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
+]
+
+
def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
"""
Validate user SBD configuration. Options 'SBD_WATCHDOG_DEV' and 'SBD_OPTS'
@@ -51,22 +59,16 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
"""
report_item_list = []
- unsupported_sbd_option_list = [
- "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER", "SBD_DEVICE"
- ]
- allowed_sbd_options = [
- "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
- ]
for sbd_opt in sbd_config:
- if sbd_opt in unsupported_sbd_option_list:
- report_item_list.append(reports.invalid_option(
- [sbd_opt], allowed_sbd_options, None
+ if sbd_opt in UNSUPPORTED_SBD_OPTION_LIST:
+ report_item_list.append(reports.invalid_options(
+ [sbd_opt], ALLOWED_SBD_OPTION_LIST, None
))
- elif sbd_opt not in allowed_sbd_options:
- report_item_list.append(reports.invalid_option(
+ elif sbd_opt not in ALLOWED_SBD_OPTION_LIST:
+ report_item_list.append(reports.invalid_options(
[sbd_opt],
- allowed_sbd_options,
+ ALLOWED_SBD_OPTION_LIST,
None,
severity=(
Severities.WARNING if allow_unknown_opts
@@ -213,12 +215,13 @@ def enable_sbd(
lib_env.report_processor.process_list(
_check_node_names_in_cluster(
- node_list, watchdog_dict.keys() + node_device_dict.keys()
+ node_list,
+ list(watchdog_dict.keys()) + list(node_device_dict.keys())
)
+
_validate_watchdog_dict(full_watchdog_dict)
+
- _validate_device_dict(full_device_dict) if using_devices else []
+ (_validate_device_dict(full_device_dict) if using_devices else [])
+
_validate_sbd_options(sbd_options, allow_unknown_opts)
)
diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
index bb9fb98..584e1b2 100644
--- a/pcs/lib/commands/stonith.py
+++ b/pcs/lib/commands/stonith.py
@@ -4,11 +4,14 @@ from __future__ import (
print_function,
)
-from pcs.lib.resource_agent import find_valid_stonith_agent_by_name as get_agent
from pcs.lib.cib import resource
from pcs.lib.cib.resource.common import are_meta_disabled
+from pcs.lib.commands.resource import (
+ _ensure_disabled_after_wait,
+ resource_environment
+)
from pcs.lib.pacemaker.values import validate_id
-from pcs.lib.commands.resource import resource_environment
+from pcs.lib.resource_agent import find_valid_stonith_agent_by_name as get_agent
def create(
env, stonith_id, stonith_agent_name,
@@ -55,8 +58,10 @@ def create(
with resource_environment(
env,
wait,
- stonith_id,
- ensure_disabled or are_meta_disabled(meta_attributes),
+ [stonith_id],
+ _ensure_disabled_after_wait(
+ ensure_disabled or are_meta_disabled(meta_attributes),
+ )
) as resources_section:
stonith_element = resource.primitive.create(
env.report_processor,
@@ -125,8 +130,10 @@ def create_in_group(
with resource_environment(
env,
wait,
- stonith_id,
- ensure_disabled or are_meta_disabled(meta_attributes),
+ [stonith_id],
+ _ensure_disabled_after_wait(
+ ensure_disabled or are_meta_disabled(meta_attributes),
+ )
) as resources_section:
stonith_element = resource.primitive.create(
env.report_processor, resources_section,
diff --git a/pcs/lib/commands/test/remote_node/__init__.py b/pcs/lib/commands/test/remote_node/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/commands/test/remote_node/fixtures_add.py b/pcs/lib/commands/test/remote_node/fixtures_add.py
new file mode 100644
index 0000000..2b674b8
--- /dev/null
+++ b/pcs/lib/commands/test/remote_node/fixtures_add.py
@@ -0,0 +1,222 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import base64
+import json
+
+from pcs.common import report_codes
+from pcs.test.tools import fixture
+from pcs.test.tools.pcs_unittest import mock
+
+
+OFFLINE_ERROR_MSG = "Could not resolve host"
+FAIL_HTTP_KWARGS = dict(
+ output="",
+ was_connected=False,
+ errno='6',
+ error_msg_template=OFFLINE_ERROR_MSG,
+)
+
+class EnvConfigMixin(object):
+ PCMK_AUTHKEY_PATH = "/etc/pacemaker/authkey"
+ def __init__(self, call_collection, wrap_helper, config):
+ self.__calls = call_collection
+ self.config = config
+
+ def distribute_authkey(
+ self, communication_list, pcmk_authkey_content, result=None, **kwargs
+ ):
+ if kwargs.get("was_connected", True):
+ result = result if result is not None else {
+ "code": "written",
+ "message": "",
+ }
+
+ kwargs["results"] = {
+ "pacemaker_remote authkey": result
+ }
+ elif result is not None:
+ raise AssertionError(
+ "Keyword 'result' makes no sense with 'was_connected=False'"
+ )
+ self.config.http.put_file(
+ communication_list=communication_list,
+ files={
+ "pacemaker_remote authkey": {
+ "type": "pcmk_remote_authkey",
+ "data": base64
+ .b64encode(pcmk_authkey_content)
+ .decode("utf-8")
+ ,
+ "rewrite_existing": True
+ }
+ },
+ **kwargs
+ )
+
+ def check_node_availability(self, label, result=True, **kwargs):
+ if "output" not in kwargs:
+ kwargs["output"] = json.dumps({"node_available": result})
+
+ self.config.http.place_multinode_call(
+ "node_available",
+ communication_list=[dict(label=label)],
+ action="remote/node_available",
+ **kwargs
+ )
+
+ def authkey_exists(self, return_value):
+ self.config.fs.exists(self.PCMK_AUTHKEY_PATH, return_value=return_value)
+
+ def open_authkey(self, pcmk_authkey_content="", fail=False):
+ kwargs = {}
+ if fail:
+ kwargs["side_effect"] = EnvironmentError("open failed")
+ else:
+ kwargs["return_value"] = mock.mock_open(
+ read_data=pcmk_authkey_content
+ )()
+
+ self.config.fs.open(
+ self.PCMK_AUTHKEY_PATH,
+ **kwargs
+ )
+
+ def push_existing_authkey_to_remote(
+ self, remote_host, distribution_result=None
+ ):
+ pcmk_authkey_content = b"password"
+ (self.config
+ .local.authkey_exists(return_value=True)
+ .local.open_authkey(pcmk_authkey_content)
+ .local.distribute_authkey(
+ communication_list=[dict(label=remote_host)],
+ pcmk_authkey_content=pcmk_authkey_content,
+ result=distribution_result
+ )
+ )
+
+ def run_pacemaker_remote(self, label, result=None, **kwargs):
+ if kwargs.get("was_connected", True):
+ result = result if result is not None else {
+ "code": "success",
+ "message": "",
+ }
+
+ kwargs["results"] = {
+ "pacemaker_remote enable": result,
+ "pacemaker_remote start": result
+ }
+ elif result is not None:
+ raise AssertionError(
+ "Keyword 'result' makes no sense with 'was_connected=False'"
+ )
+
+ self.config.http.manage_services(
+ communication_list=[dict(label=label)],
+ action_map={
+ "pacemaker_remote enable": {
+ "type": "service_command",
+ "service": "pacemaker_remote",
+ "command": "enable",
+ },
+ "pacemaker_remote start": {
+ "type": "service_command",
+ "service": "pacemaker_remote",
+ "command": "start",
+ },
+ },
+ **kwargs
+ )
+
+REPORTS = (fixture.ReportStore()
+ .info(
+ "authkey_distribution_started" ,
+ report_codes.FILES_DISTRIBUTION_STARTED,
+ #python 3 has dict_keys so list is not the right structure
+ file_list={"pacemaker_remote authkey": None}.keys(),
+ description="remote node configuration files",
+ )
+ .info(
+ "authkey_distribution_success",
+ report_codes.FILE_DISTRIBUTION_SUCCESS,
+ file_description="pacemaker_remote authkey",
+ )
+ .info(
+ "pcmk_remote_start_enable_started",
+ report_codes.SERVICE_COMMANDS_ON_NODES_STARTED,
+ #python 3 has dict_keys so list is not the right structure
+ action_list={
+ "pacemaker_remote start": None,
+ "pacemaker_remote enable": None,
+ }.keys(),
+ description="start of service pacemaker_remote",
+ )
+ .info(
+ "pcmk_remote_enable_success",
+ report_codes.SERVICE_COMMAND_ON_NODE_SUCCESS,
+ service_command_description="pacemaker_remote enable",
+ )
+ .info(
+ "pcmk_remote_start_success",
+ report_codes.SERVICE_COMMAND_ON_NODE_SUCCESS,
+ service_command_description="pacemaker_remote start",
+ )
+)
+
+EXTRA_REPORTS = (fixture.ReportStore()
+ .error(
+ "manage_services_connection_failed",
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ command="remote/manage_services",
+ reason=OFFLINE_ERROR_MSG,
+ force_code=report_codes.SKIP_OFFLINE_NODES
+ )
+ .as_warn(
+ "manage_services_connection_failed",
+ "manage_services_connection_failed_warn",
+ )
+ .copy(
+ "manage_services_connection_failed",
+ "check_availability_connection_failed",
+ command="remote/node_available",
+ )
+ .as_warn(
+ "check_availability_connection_failed",
+ "check_availability_connection_failed_warn",
+ )
+ .copy(
+ "manage_services_connection_failed",
+ "put_file_connection_failed",
+ command="remote/put_file",
+ )
+ .as_warn(
+ "put_file_connection_failed",
+ "put_file_connection_failed_warn",
+ )
+ .error(
+ "pcmk_remote_enable_failed",
+ report_codes.SERVICE_COMMAND_ON_NODE_ERROR,
+ reason="Operation failed.",
+ service_command_description="pacemaker_remote enable",
+ force_code=report_codes.SKIP_ACTION_ON_NODES_ERRORS,
+ )
+ .as_warn("pcmk_remote_enable_failed", "pcmk_remote_enable_failed_warn")
+ .copy(
+ "pcmk_remote_enable_failed",
+ "pcmk_remote_start_failed",
+ service_command_description="pacemaker_remote start",
+ )
+ .as_warn("pcmk_remote_start_failed", "pcmk_remote_start_failed_warn")
+ .error(
+ "authkey_distribution_failed",
+ report_codes.FILE_DISTRIBUTION_ERROR,
+ reason="File already exists",
+ file_description="pacemaker_remote authkey",
+ force_code=report_codes.SKIP_FILE_DISTRIBUTION_ERRORS
+ )
+ .as_warn("authkey_distribution_failed", "authkey_distribution_failed_warn")
+)
diff --git a/pcs/lib/commands/test/remote_node/fixtures_remove.py b/pcs/lib/commands/test/remote_node/fixtures_remove.py
new file mode 100644
index 0000000..02b7cb9
--- /dev/null
+++ b/pcs/lib/commands/test/remote_node/fixtures_remove.py
@@ -0,0 +1,174 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.common import report_codes
+from pcs.test.tools import fixture
+
+OFFLINE_ERROR_MSG = "Could not resolve host"
+
+class EnvConfigMixin(object):
+ def __init__(self, call_collection, wrap_helper, config):
+ self.__calls = call_collection
+ self.config = config
+
+ def destroy_pacemaker_remote(
+ self, label=None, address_list=None, result=None, **kwargs
+ ):
+ if kwargs.get("was_connected", True):
+ result = result if result is not None else {
+ "code": "success",
+ "message": "",
+ }
+
+ kwargs["results"] = {
+ "pacemaker_remote stop": result,
+ "pacemaker_remote disable": result
+ }
+ elif result is not None:
+ raise AssertionError(
+ "Keyword 'result' makes no sense with 'was_connected=False'"
+ )
+
+ if label or address_list:
+ if kwargs.get("communication_list", None):
+ raise AssertionError(
+ "Keywords 'label' and 'address_list' makes no sense with"
+ " 'communication_list != None'"
+ )
+ kwargs["communication_list"] = [
+ dict(label=label, address_list=address_list)
+ ]
+
+ self.config.http.manage_services(
+ action_map={
+ "pacemaker_remote stop": {
+ "type": "service_command",
+ "service": "pacemaker_remote",
+ "command": "stop",
+ },
+ "pacemaker_remote disable": {
+ "type": "service_command",
+ "service": "pacemaker_remote",
+ "command": "disable",
+ },
+ },
+ **kwargs
+ )
+
+ def remove_authkey(
+ self, communication_list, result=None, **kwargs
+ ):
+ if kwargs.get("was_connected", True):
+ result = result if result is not None else {
+ "code": "deleted",
+ "message": "",
+ }
+
+ kwargs["results"] = {
+ "pacemaker_remote authkey": result
+ }
+ elif result is not None:
+ raise AssertionError(
+ "Keyword 'result' makes no sense with 'was_connected=False'"
+ )
+ self.config.http.remove_file(
+ communication_list=communication_list,
+ files={
+ "pacemaker_remote authkey": {
+ "type": "pcmk_remote_authkey",
+ }
+ },
+ **kwargs
+ )
+
+REPORTS = (fixture.ReportStore()
+ .info(
+ "pcmk_remote_disable_stop_started",
+ report_codes.SERVICE_COMMANDS_ON_NODES_STARTED,
+ #python 3 has dict_keys so list is not the right structure
+ action_list={
+ "pacemaker_remote disable": None,
+ "pacemaker_remote stop": None,
+ }.keys(),
+ description="stop of service pacemaker_remote",
+ )
+ .info(
+ "pcmk_remote_disable_success",
+ report_codes.SERVICE_COMMAND_ON_NODE_SUCCESS,
+ service_command_description="pacemaker_remote disable",
+ )
+ .info(
+ "pcmk_remote_stop_success",
+ report_codes.SERVICE_COMMAND_ON_NODE_SUCCESS,
+ service_command_description="pacemaker_remote stop",
+ )
+ .info(
+ "authkey_remove_started" ,
+ report_codes.FILES_REMOVE_FROM_NODE_STARTED,
+ #python 3 has dict_keys so list is not the right structure
+ file_list={"pacemaker_remote authkey": None}.keys(),
+ description="remote node files",
+ )
+ .info(
+ "authkey_remove_success",
+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
+ file_description="pacemaker_remote authkey",
+ )
+)
+
+EXTRA_REPORTS = (fixture.ReportStore()
+ .error(
+ "manage_services_connection_failed",
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ command="remote/manage_services",
+ reason=OFFLINE_ERROR_MSG,
+ force_code=report_codes.SKIP_OFFLINE_NODES
+ )
+ .as_warn(
+ "manage_services_connection_failed",
+ "manage_services_connection_failed_warn",
+ )
+ .copy(
+ "manage_services_connection_failed",
+ "remove_file_connection_failed",
+ command="remote/remove_file",
+ )
+ .as_warn(
+ "remove_file_connection_failed",
+ "remove_file_connection_failed_warn",
+ )
+ .error(
+ "authkey_remove_failed",
+ report_codes.FILE_REMOVE_FROM_NODE_ERROR,
+ reason="Access denied",
+ file_description="pacemaker_remote authkey",
+ force_code=report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
+ )
+ .as_warn(
+ "authkey_remove_failed",
+ "authkey_remove_failed_warn",
+ )
+ .error(
+ "pcmk_remote_disable_failed",
+ report_codes.SERVICE_COMMAND_ON_NODE_ERROR,
+ reason="Operation failed.",
+ service_command_description="pacemaker_remote disable",
+ force_code=report_codes.SKIP_ACTION_ON_NODES_ERRORS,
+ )
+ .as_warn(
+ "pcmk_remote_disable_failed",
+ "pcmk_remote_disable_failed_warn",
+ )
+ .copy(
+ "pcmk_remote_disable_failed",
+ "pcmk_remote_stop_failed",
+ service_command_description="pacemaker_remote stop",
+ )
+ .as_warn(
+ "pcmk_remote_stop_failed",
+ "pcmk_remote_stop_failed_warn",
+ )
+)
diff --git a/pcs/lib/commands/test/remote_node/test_node_add_guest.py b/pcs/lib/commands/test/remote_node/test_node_add_guest.py
new file mode 100644
index 0000000..d28d6e0
--- /dev/null
+++ b/pcs/lib/commands/test/remote_node/test_node_add_guest.py
@@ -0,0 +1,457 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from functools import partial
+
+from pcs.common import report_codes
+from pcs.lib.commands.remote_node import node_add_guest as node_add_guest_orig
+from pcs.lib.commands.test.remote_node.fixtures_add import(
+ EnvConfigMixin,
+ REPORTS as FIXTURE_REPORTS,
+ EXTRA_REPORTS as FIXTURE_EXTRA_REPORTS,
+ FAIL_HTTP_KWARGS,
+)
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+
+NODE_NAME = "node-name"
+REMOTE_HOST = "remote-host"
+VIRTUAL_MACHINE_ID = "virtual_machine_id"
+NODE_1 = "node-1"
+NODE_2 = "node-2"
+
+def node_add_guest(
+ env, node_name=NODE_NAME, resource_id=VIRTUAL_MACHINE_ID, options=None,
+ **kwargs
+):
+ options = options or {"remote-addr": REMOTE_HOST}
+ node_add_guest_orig(env, node_name, resource_id, options, **kwargs)
+
+FIXTURE_RESOURCES = """
+ <resources>
+ <primitive class="ocf" id="{0}"
+ provider="heartbeat" type="VirtualDomain"
+ />
+ </resources>
+""".format(VIRTUAL_MACHINE_ID)
+
+FIXTURE_META_ATTRIBUTES = """
+ <meta_attributes id="virtual_machine_id-meta_attributes">
+ <nvpair id="virtual_machine_id-meta_attributes-remote-addr"
+ name="remote-addr" value="remote-host"
+ />
+ <nvpair id="virtual_machine_id-meta_attributes-remote-node"
+ name="remote-node" value="node-name"
+ />
+ </meta_attributes>
+"""
+
+
+class LocalConfig(EnvConfigMixin):
+ def load_cib(self):
+ self.config.runner.cib.load(resources=FIXTURE_RESOURCES)
+
+ def push_cib(self, wait=False, meta_attributes=FIXTURE_META_ATTRIBUTES):
+ self.config.env.push_cib(
+ append={
+ './/resources/primitive[@id="{0}"]'
+ .format(VIRTUAL_MACHINE_ID): meta_attributes
+ ,
+ },
+ wait=wait
+ )
+
+get_env_tools = partial(get_env_tools, local_extensions={"local": LocalConfig})
+
+def base_reports_for_host(host=REMOTE_HOST):
+ return (
+ FIXTURE_REPORTS
+ .adapt("authkey_distribution_started", node_list=[host])
+ .adapt("authkey_distribution_success", node=host)
+ .adapt("pcmk_remote_start_enable_started", node_list=[host])
+ .adapt("pcmk_remote_enable_success", node=host)
+ .adapt("pcmk_remote_start_success", node=host)
+ )
+
+REPORTS = base_reports_for_host()
+
+EXTRA_REPORTS = (FIXTURE_EXTRA_REPORTS.adapt_multi(
+ [
+ "manage_services_connection_failed",
+ "manage_services_connection_failed_warn",
+ "check_availability_connection_failed",
+ "check_availability_connection_failed_warn",
+ "put_file_connection_failed",
+ "put_file_connection_failed_warn",
+ "pcmk_remote_enable_failed",
+ "pcmk_remote_enable_failed_warn",
+ "pcmk_remote_start_failed",
+ "pcmk_remote_start_failed_warn",
+ "authkey_distribution_failed",
+ "authkey_distribution_failed_warn",
+ ],
+ node=REMOTE_HOST
+))
+
+class AddGuest(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def test_success_base(self):
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST)
+ .local.push_existing_authkey_to_remote(REMOTE_HOST)
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .local.push_cib()
+ )
+ node_add_guest(self.env_assist.get_env())
+ self.env_assist.assert_reports(REPORTS)
+
+
+ @mock.patch("pcs.lib.commands.remote_node.generate_key")
+ def test_success_generated_authkey(self, generate_key):
+ generate_key.return_value = b"password"
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ .local.authkey_exists(return_value=False)
+ .local.distribute_authkey(
+ communication_list=[
+ dict(label=NODE_1),
+ dict(label=NODE_2),
+ dict(label=REMOTE_HOST),
+ ],
+ pcmk_authkey_content=generate_key.return_value,
+ )
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .local.push_cib()
+ )
+ node_add_guest(self.env_assist.get_env())
+ generate_key.assert_called_once_with()
+ self.env_assist.assert_reports(
+ REPORTS
+ .adapt(
+ "authkey_distribution_started",
+ node_list=[NODE_1, NODE_2, REMOTE_HOST]
+ )
+ .copy(
+ "authkey_distribution_success",
+ "authkey_distribution_success_node1",
+ node=NODE_1,
+ )
+ .copy(
+ "authkey_distribution_success",
+ "authkey_distribution_success_node2",
+ node=NODE_2,
+ )
+ )
+
+ def test_can_skip_all_offline(self):
+ pcmk_authkey_content = b"password"
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, **FAIL_HTTP_KWARGS)
+ .local.authkey_exists(return_value=True)
+ .local.open_authkey(pcmk_authkey_content)
+ .local.distribute_authkey(
+ communication_list=[dict(label=REMOTE_HOST)],
+ pcmk_authkey_content=pcmk_authkey_content,
+ **FAIL_HTTP_KWARGS
+ )
+ .local.run_pacemaker_remote(REMOTE_HOST, **FAIL_HTTP_KWARGS)
+ .local.push_cib()
+ )
+ node_add_guest(self.env_assist.get_env(), skip_offline_nodes=True)
+ self.env_assist.assert_reports(
+ REPORTS.select(
+ "authkey_distribution_started",
+ "pcmk_remote_start_enable_started",
+ )
+ +
+ EXTRA_REPORTS.select(
+ "check_availability_connection_failed_warn",
+ "put_file_connection_failed_warn",
+ "manage_services_connection_failed_warn",
+ )
+ )
+
+ def test_changed_options(self):
+ meta_attributes="""
+ <meta_attributes id="virtual_machine_id-meta_attributes">
+ <nvpair
+ id="virtual_machine_id-meta_attributes-remote-connect-timeout"
+ name="remote-connect-timeout" value="20"
+ />
+ <nvpair
+ id="virtual_machine_id-meta_attributes-remote-node"
+ name="remote-node" value="node-name"
+ />
+ <nvpair
+ id="virtual_machine_id-meta_attributes-remote-port"
+ name="remote-port" value="1234"
+ />
+ </meta_attributes>
+ """
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .local.check_node_availability(NODE_NAME)
+ .local.push_existing_authkey_to_remote(NODE_NAME)
+ .local.run_pacemaker_remote(NODE_NAME)
+ .local.push_cib(meta_attributes=meta_attributes)
+ )
+ node_add_guest(self.env_assist.get_env(), options={
+ #remote-addr is ommited here
+ "remote-port": 1234,
+ "remote-connect-timeout": 20
+ })
+ self.env_assist.assert_reports(base_reports_for_host(NODE_NAME))
+
+ def test_noexistent_resource(self):
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_guest(
+ self.env_assist.get_env(),
+ resource_id="NOEXISTENT"
+ ),
+ [
+ fixture.error(
+ report_codes.ID_NOT_FOUND,
+ expected_types=["primitive"],
+ context_type="resources",
+ id="NOEXISTENT",
+ context_id=""
+ )
+ ],
+ )
+
+ def test_validate_values(self):
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_guest(
+ self.env_assist.get_env(),
+ node_name="*name",
+ options={
+ "remote-addr": "*addr",
+ "remote-port": "abc",
+ "remote-connect-timeout": "def",
+ }
+ ),
+ [
+ fixture.error(
+ report_codes.INVALID_OPTION_VALUE,
+ option_name="remote-connect-timeout",
+ option_value="def",
+ allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)"
+ ),
+ fixture.error(
+ report_codes.INVALID_OPTION_VALUE,
+ option_name="remote-port",
+ option_value="abc",
+ allowed_values="a port number (1-65535)"
+ )
+ ]
+ )
+
+class WithWait(TestCase):
+ def setUp(self):
+ self.wait = 1
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .runner.pcmk.can_wait()
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST)
+ .local.push_existing_authkey_to_remote(REMOTE_HOST)
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .local.push_cib(wait=self.wait)
+ )
+
+ def test_success_when_resource_started(self):
+ (self.config
+ .runner.pcmk.load_state(raw_resources=dict(
+ resource_id=VIRTUAL_MACHINE_ID,
+ resource_agent="ocf::pacemaker:remote",
+ node_name=NODE_1,
+ ))
+ )
+ node_add_guest(self.env_assist.get_env(), wait=self.wait)
+ self.env_assist.assert_reports(
+ REPORTS
+ .info(
+ "resource_running",
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": [NODE_1]},
+ resource_id=VIRTUAL_MACHINE_ID
+ )
+ )
+
+ def test_fail_when_resource_not_started(self):
+ (self.config
+ .runner.pcmk.load_state(raw_resources=dict(
+ resource_id=VIRTUAL_MACHINE_ID,
+ resource_agent="ocf::pacemaker:remote",
+ node_name=NODE_1,
+ failed="true",
+ ))
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_guest(self.env_assist.get_env(), wait=self.wait),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id=VIRTUAL_MACHINE_ID,
+ )
+ ]
+ )
+ self.env_assist.assert_reports(REPORTS)
+
+class RemoteService(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST)
+ .local.push_existing_authkey_to_remote(REMOTE_HOST)
+ )
+ def test_fails_when_offline(self):
+ (self.config
+ .local.run_pacemaker_remote(label=REMOTE_HOST, **FAIL_HTTP_KWARGS)
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_guest(self.env_assist.get_env()),
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_enable_success"]
+ +
+ EXTRA_REPORTS.select("manage_services_connection_failed")
+ )
+
+ def test_fail_when_remotely_fail(self):
+ (self.config
+ .local.run_pacemaker_remote(REMOTE_HOST, result={
+ "code": "fail",
+ "message": "Action failed",
+ })
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_guest(self.env_assist.get_env()),
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_enable_success"] + EXTRA_REPORTS.select(
+ "pcmk_remote_enable_failed",
+ "pcmk_remote_start_failed",
+ )
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ (self.config
+ .local.run_pacemaker_remote(REMOTE_HOST, result={
+ "code": "fail",
+ "message": "Action failed",
+ })
+ .local.push_cib()
+ )
+ node_add_guest(
+ self.env_assist.get_env(),
+ allow_pacemaker_remote_service_fail=True
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_enable_success"] + EXTRA_REPORTS.select(
+ "pcmk_remote_enable_failed_warn",
+ "pcmk_remote_start_failed_warn",
+ )
+ )
+
+class AuthkeyDistribution(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .local.load_cib()
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST)
+ )
+
+ def test_fails_when_offline(self):
+ pcmk_authkey_content = b"password"
+ (self.config
+ .local.authkey_exists(return_value=True)
+ .local.open_authkey(pcmk_authkey_content)
+ .local.distribute_authkey(
+ communication_list=[dict(label=REMOTE_HOST)],
+ pcmk_authkey_content=pcmk_authkey_content,
+ **FAIL_HTTP_KWARGS
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_guest(self.env_assist.get_env())
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"authkey_distribution_success"] + EXTRA_REPORTS.only(
+ "manage_services_connection_failed",
+ command="remote/put_file",
+ )
+ )
+
+ def test_fail_when_remotely_fail(self):
+ (self.config
+ .local.push_existing_authkey_to_remote(
+ REMOTE_HOST,
+ distribution_result={
+ "code": "conflict",
+ "message": "",
+ }
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_guest(self.env_assist.get_env())
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS[:"authkey_distribution_success"]
+ +
+ EXTRA_REPORTS.select("authkey_distribution_failed")
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ (self.config
+ .local.push_existing_authkey_to_remote(
+ REMOTE_HOST,
+ distribution_result={
+ "code": "conflict",
+ "message": "",
+ }
+ )
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .local.push_cib()
+ )
+
+ node_add_guest(
+ self.env_assist.get_env(),
+ allow_incomplete_distribution=True,
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_distribution_success")
+ +
+ EXTRA_REPORTS.select("authkey_distribution_failed_warn")
+ )
diff --git a/pcs/lib/commands/test/remote_node/test_node_add_remote.py b/pcs/lib/commands/test/remote_node/test_node_add_remote.py
new file mode 100644
index 0000000..6e4a7ff
--- /dev/null
+++ b/pcs/lib/commands/test/remote_node/test_node_add_remote.py
@@ -0,0 +1,518 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from functools import partial
+
+from pcs.common import report_codes, env_file_role_codes
+from pcs.lib.commands.remote_node import node_add_remote as node_add_remote_orig
+from pcs.lib.commands.test.remote_node.fixtures_add import(
+ EnvConfigMixin,
+ REPORTS as FIXTURE_REPORTS,
+ EXTRA_REPORTS as FIXTURE_EXTRA_REPORTS,
+ FAIL_HTTP_KWARGS,
+)
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+
+REMOTE_HOST = "remote-host"
+NODE_NAME = "node-name"
+NODE_1 = "node-1"
+NODE_2 = "node-2"
+
+def node_add_remote(
+ env, host=None, node_name=None, operations=None, meta_attributes=None,
+ instance_attributes=None, **kwargs
+):
+ operations = operations or []
+ meta_attributes = meta_attributes or {}
+ instance_attributes = instance_attributes or {}
+ host = host or REMOTE_HOST
+ node_name = node_name or NODE_NAME
+
+ node_add_remote_orig(
+ env, host, node_name, operations, meta_attributes, instance_attributes,
+ **kwargs
+ )
+
+class LocalConfig(EnvConfigMixin):
+ def load_cluster_configs(self, cluster_node_list):
+ (self.config
+ .runner.cib.load()
+ .corosync_conf.load(node_name_list=cluster_node_list)
+ .runner.pcmk.load_agent(agent_name="ocf:pacemaker:remote")
+ )
+
+get_env_tools = partial(get_env_tools, local_extensions={"local": LocalConfig})
+
+REPORTS = (FIXTURE_REPORTS
+ .adapt("authkey_distribution_started", node_list=[REMOTE_HOST])
+ .adapt("authkey_distribution_success", node=REMOTE_HOST)
+ .adapt("pcmk_remote_start_enable_started", node_list=[REMOTE_HOST])
+ .adapt("pcmk_remote_enable_success", node=REMOTE_HOST)
+ .adapt("pcmk_remote_start_success", node=REMOTE_HOST)
+)
+EXTRA_REPORTS = (FIXTURE_EXTRA_REPORTS.adapt_multi(
+ [
+ "manage_services_connection_failed",
+ "manage_services_connection_failed_warn",
+ "check_availability_connection_failed",
+ "check_availability_connection_failed_warn",
+ "put_file_connection_failed",
+ "put_file_connection_failed_warn",
+ "pcmk_remote_enable_failed",
+ "pcmk_remote_enable_failed_warn",
+ "pcmk_remote_start_failed",
+ "pcmk_remote_start_failed_warn",
+ "authkey_distribution_failed",
+ "authkey_distribution_failed_warn",
+ ],
+ node=REMOTE_HOST
+))
+
+
+FIXTURE_RESOURCES = """
+ <resources>
+ <primitive class="ocf" id="node-name" provider="pacemaker"
+ type="remote"
+ >
+ <instance_attributes id="node-name-instance_attributes">
+ <nvpair
+ id="node-name-instance_attributes-server"
+ name="server" value="remote-host"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="node-name-migrate_from-interval-0s"
+ interval="0s" name="migrate_from" timeout="60"
+ />
+ <op id="node-name-migrate_to-interval-0s"
+ interval="0s" name="migrate_to" timeout="60"
+ />
+ <op id="node-name-monitor-interval-60s"
+ interval="60s" name="monitor" timeout="30"
+ />
+ <op id="node-name-reload-interval-0s"
+ interval="0s" name="reload" timeout="60"
+ />
+ <op id="node-name-start-interval-0s"
+ interval="0s" name="start" timeout="60"
+ />
+ <op id="node-name-stop-interval-0s"
+ interval="0s" name="stop" timeout="60"
+ />
+ </operations>
+ </primitive>
+ </resources>
+"""
+
+class AddRemote(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def test_success_base(self):
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ .local.push_existing_authkey_to_remote(REMOTE_HOST)
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .env.push_cib(resources=FIXTURE_RESOURCES)
+ )
+ node_add_remote(self.env_assist.get_env())
+ self.env_assist.assert_reports(REPORTS)
+
+ def test_success_base_host_as_name(self):
+ #validation and creation of resource is covered in resource create tests
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ .local.push_existing_authkey_to_remote(REMOTE_HOST)
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .env.push_cib(
+ resources="""
+ <resources>
+ <primitive class="ocf" id="remote-host"
+ provider="pacemaker" type="remote"
+ >
+ <operations>
+ <op id="remote-host-migrate_from-interval-0s"
+ interval="0s" name="migrate_from" timeout="60"
+ />
+ <op id="remote-host-migrate_to-interval-0s"
+ interval="0s" name="migrate_to" timeout="60"
+ />
+ <op id="remote-host-monitor-interval-60s"
+ interval="60s" name="monitor" timeout="30"
+ />
+ <op id="remote-host-reload-interval-0s"
+ interval="0s" name="reload" timeout="60"
+ />
+ <op id="remote-host-start-interval-0s"
+ interval="0s" name="start" timeout="60"
+ />
+ <op id="remote-host-stop-interval-0s"
+ interval="0s" name="stop" timeout="60"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
+ )
+ node_add_remote(self.env_assist.get_env(), node_name=REMOTE_HOST)
+ self.env_assist.assert_reports(REPORTS)
+
+ def test_node_name_conflict_report_is_unique(self):
+ (self.config
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <primitive class="ocf" id="node-name"
+ provider="pacemaker" type="remote"
+ />
+ </resources>
+ """
+ )
+ .corosync_conf.load(node_name_list=[NODE_1, NODE_2])
+ .runner.pcmk.load_agent(agent_name="ocf:pacemaker:remote")
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.ID_ALREADY_EXISTS,
+ id=NODE_NAME,
+ )
+ ]
+ )
+
+ @mock.patch("pcs.lib.commands.remote_node.generate_key")
+ def test_success_generated_authkey(self, generate_key):
+ generate_key.return_value = b"password"
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ .local.authkey_exists(return_value=False)
+ .local.distribute_authkey(
+ communication_list=[
+ dict(label=NODE_1),
+ dict(label=NODE_2),
+ dict(label=REMOTE_HOST),
+ ],
+ pcmk_authkey_content=generate_key.return_value,
+ )
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .env.push_cib(resources=FIXTURE_RESOURCES)
+ )
+ node_add_remote(self.env_assist.get_env())
+ generate_key.assert_called_once_with()
+ self.env_assist.assert_reports(
+ REPORTS
+ .adapt(
+ "authkey_distribution_started",
+ node_list=[NODE_1, NODE_2, REMOTE_HOST]
+ )
+ .copy(
+ "authkey_distribution_success",
+ "authkey_distribution_success_node1",
+ node=NODE_1,
+ )
+ .copy(
+ "authkey_distribution_success",
+ "authkey_distribution_success_node2",
+ node=NODE_2,
+ )
+ )
+
+ def test_can_skip_all_offline(self):
+ pcmk_authkey_content = b"password"
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, **FAIL_HTTP_KWARGS)
+ .local.authkey_exists(return_value=True)
+ .local.open_authkey(pcmk_authkey_content)
+ .local.distribute_authkey(
+ communication_list=[dict(label=REMOTE_HOST)],
+ pcmk_authkey_content=pcmk_authkey_content,
+ **FAIL_HTTP_KWARGS
+ )
+ .local.run_pacemaker_remote(REMOTE_HOST, **FAIL_HTTP_KWARGS)
+ .env.push_cib(resources=FIXTURE_RESOURCES)
+ )
+ node_add_remote(self.env_assist.get_env(), skip_offline_nodes=True)
+ self.env_assist.assert_reports(
+ REPORTS.select(
+ "authkey_distribution_started",
+ "pcmk_remote_start_enable_started",
+ )
+ +
+ EXTRA_REPORTS.select(
+ "check_availability_connection_failed_warn",
+ "put_file_connection_failed_warn",
+ "manage_services_connection_failed_warn",
+ )
+ )
+
+ def test_fails_when_remote_node_is_not_prepared(self):
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=False)
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
+ node=REMOTE_HOST,
+ )
+ ]
+ )
+
+ def test_fails_when_remote_node_returns_invalid_output(self):
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, output="INVALID_OUTPUT")
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.INVALID_RESPONSE_FORMAT,
+ node=REMOTE_HOST,
+ )
+ ]
+ )
+
+ def test_open_failed(self):
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ .local.authkey_exists(return_value=True)
+ .local.open_authkey(fail=True)
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(
+ self.env_assist.get_env(),
+ ),
+ [
+ fixture.error(
+ report_codes.FILE_IO_ERROR,
+ file_role=env_file_role_codes.PACEMAKER_AUTHKEY,
+ file_path=LocalConfig.PCMK_AUTHKEY_PATH,
+ operation="read",
+ )
+ ],
+ expected_in_processor=False
+ )
+
+ def test_validate_host_already_exists(self):
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ )
+ #more validation tests in pcs/lib/cib/test/test_resource_remote_node.py
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(
+ self.env_assist.get_env(),
+ host=NODE_1,
+ ),
+ [
+ fixture.error(
+ report_codes.ID_ALREADY_EXISTS,
+ id=NODE_1
+ )
+ ]
+ )
+
+class WithWait(TestCase):
+ def setUp(self):
+ self. wait = 1
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .runner.pcmk.can_wait()
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ .local.push_existing_authkey_to_remote(REMOTE_HOST)
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .env.push_cib(resources=FIXTURE_RESOURCES, wait=self.wait)
+ )
+
+ def test_success_when_resource_started(self):
+ (self.config
+ .runner.pcmk.load_state(raw_resources=dict(
+ resource_id=NODE_NAME,
+ resource_agent="ocf::pacemaker:remote",
+ node_name=NODE_1,
+ ))
+ )
+ node_add_remote(self.env_assist.get_env(), wait=self.wait)
+ self.env_assist.assert_reports(
+ REPORTS
+ .info(
+ "resource_running",
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": [NODE_1]},
+ resource_id=NODE_NAME
+ )
+ )
+
+ def test_fail_when_resource_not_started(self):
+ (self.config
+ .runner.pcmk.load_state(raw_resources=dict(
+ resource_id=NODE_NAME,
+ resource_agent="ocf::pacemaker:remote",
+ node_name=NODE_1,
+ failed="true",
+ ))
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env(), wait=self.wait),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id=NODE_NAME,
+ )
+ ]
+ )
+ self.env_assist.assert_reports(REPORTS)
+
+class AddRemotePcmkRemoteService(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ .local.push_existing_authkey_to_remote(REMOTE_HOST)
+ )
+
+ def test_fails_when_offline(self):
+ (self.config
+ .local.run_pacemaker_remote(label=REMOTE_HOST, **FAIL_HTTP_KWARGS)
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env())
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_enable_success"]
+ +
+ EXTRA_REPORTS.select("manage_services_connection_failed")
+ )
+
+ def test_fail_when_remotely_fail(self):
+ (self.config
+ .local.run_pacemaker_remote(REMOTE_HOST, result={
+ "code": "fail",
+ "message": "Action failed",
+ })
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env())
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_enable_success"] + EXTRA_REPORTS.select(
+ "pcmk_remote_enable_failed",
+ "pcmk_remote_start_failed",
+ )
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ (self.config
+ .local.run_pacemaker_remote(REMOTE_HOST, result={
+ "code": "fail",
+ "message": "Action failed",
+ })
+ .env.push_cib(resources=FIXTURE_RESOURCES)
+ )
+ node_add_remote(
+ self.env_assist.get_env(),
+ allow_pacemaker_remote_service_fail=True
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_enable_success"] + EXTRA_REPORTS.select(
+ "pcmk_remote_enable_failed_warn",
+ "pcmk_remote_start_failed_warn",
+ )
+ )
+
+class AddRemoteAuthkeyDistribution(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .local.load_cluster_configs(cluster_node_list=[NODE_1, NODE_2])
+ .local.check_node_availability(REMOTE_HOST, result=True)
+ )
+
+ def test_fails_when_offline(self):
+ pcmk_authkey_content = b"password"
+ (self.config
+ .local.authkey_exists(return_value=True)
+ .local.open_authkey(pcmk_authkey_content)
+ .local.distribute_authkey(
+ communication_list=[dict(label=REMOTE_HOST)],
+ pcmk_authkey_content=pcmk_authkey_content,
+ **FAIL_HTTP_KWARGS
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env())
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"authkey_distribution_success"]
+ +
+ EXTRA_REPORTS.only(
+ "manage_services_connection_failed",
+ command="remote/put_file",
+ )
+ )
+
+ def test_fail_when_remotely_fail(self):
+ (self.config
+ .local.push_existing_authkey_to_remote(
+ REMOTE_HOST,
+ distribution_result={
+ "code": "conflict",
+ "message": "",
+ }
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: node_add_remote(self.env_assist.get_env())
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS[:"authkey_distribution_success"]
+ +
+ EXTRA_REPORTS.select("authkey_distribution_failed")
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ (self.config
+ .local.push_existing_authkey_to_remote(
+ REMOTE_HOST,
+ distribution_result={
+ "code": "conflict",
+ "message": "",
+ }
+ )
+ .local.run_pacemaker_remote(REMOTE_HOST)
+ .env.push_cib(resources=FIXTURE_RESOURCES)
+ )
+
+ node_add_remote(
+ self.env_assist.get_env(),
+ allow_incomplete_distribution=True,
+ )
+
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_distribution_success")
+ +
+ EXTRA_REPORTS.select("authkey_distribution_failed_warn")
+ )
diff --git a/pcs/lib/commands/test/remote_node/test_node_remove_guest.py b/pcs/lib/commands/test/remote_node/test_node_remove_guest.py
new file mode 100644
index 0000000..5da294d
--- /dev/null
+++ b/pcs/lib/commands/test/remote_node/test_node_remove_guest.py
@@ -0,0 +1,474 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from functools import partial
+
+from pcs.common import report_codes
+from pcs.lib.commands.remote_node import(
+ node_remove_guest as node_remove_guest_orig
+)
+from pcs.lib.commands.test.remote_node.fixtures_add import FAIL_HTTP_KWARGS
+from pcs.lib.commands.test.remote_node.fixtures_remove import(
+ EnvConfigMixin,
+ REPORTS as FIXTURE_REPORTS,
+ EXTRA_REPORTS as FIXTURE_EXTRA_REPORTS,
+)
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+REMOTE_HOST = "remote-host"
+NODE_NAME = "node-name"
+VIRTUAL_MACHINE_ID = "virtual_machine_id"
+
+def node_remove_guest(env, node_identifier=REMOTE_HOST, **kwargs):
+ node_remove_guest_orig(env, node_identifier, **kwargs)
+
+REPORTS = (FIXTURE_REPORTS
+ .adapt("pcmk_remote_disable_stop_started", node_list=[NODE_NAME])
+ .adapt("pcmk_remote_disable_success", node=NODE_NAME)
+ .adapt("pcmk_remote_stop_success", node=NODE_NAME)
+ .adapt("authkey_remove_started", node_list=[NODE_NAME])
+ .adapt("authkey_remove_success", node=NODE_NAME)
+)
+
+EXTRA_REPORTS = (FIXTURE_EXTRA_REPORTS
+ .adapt_multi(
+ [
+ "manage_services_connection_failed",
+ "manage_services_connection_failed_warn",
+ "remove_file_connection_failed",
+ "remove_file_connection_failed_warn",
+ ],
+ node=REMOTE_HOST
+ )
+ .adapt_multi(
+ [
+ "authkey_remove_failed",
+ "authkey_remove_failed_warn",
+ "pcmk_remote_disable_failed",
+ "pcmk_remote_disable_failed_warn",
+ "pcmk_remote_stop_failed",
+ "pcmk_remote_stop_failed_warn",
+ ],
+ node=NODE_NAME
+ )
+)
+
+FIXTURE_RESOURCES = """
+ <resources>
+ <primitive class="ocf" id="{0}"
+ provider="heartbeat" type="VirtualDomain"
+ >
+ <meta_attributes id="virtual_machine_id-meta_attributes">
+ <nvpair id="virtual_machine_id-meta_attributes-remote-addr"
+ name="remote-addr" value="{1}"
+ />
+ <nvpair id="virtual_machine_id-meta_attributes-remote-node"
+ name="remote-node" value="{2}"
+ />
+ </meta_attributes>
+ </primitive>
+ </resources>
+""".format(VIRTUAL_MACHINE_ID, REMOTE_HOST, NODE_NAME)
+
+get_env_tools = partial(get_env_tools, local_extensions={
+ "local": EnvConfigMixin
+})
+
+class RemoveGuest(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def find_by(self, identifier):
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST]
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ )
+ .env.push_cib(remove=".//primitive/meta_attributes")
+ .runner.pcmk.remove_node(NODE_NAME)
+ )
+ node_remove_guest(self.env_assist.get_env(), node_identifier=identifier)
+ self.env_assist.assert_reports(REPORTS)
+
+
+ def test_success_base(self):
+ self.find_by(REMOTE_HOST)
+
+ def test_can_find_by_node_name(self):
+ self.find_by(NODE_NAME)
+
+ def test_can_find_by_resource_id(self):
+ self.find_by(VIRTUAL_MACHINE_ID)
+
+class RemoveGuestOthers(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def test_success_with_wait(self):
+ wait = 10
+ (self.config
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST]
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ )
+ .env.push_cib(remove=".//primitive/meta_attributes", wait=wait)
+ .runner.pcmk.remove_node(NODE_NAME)
+ )
+ node_remove_guest(self.env_assist.get_env(), wait=wait)
+ self.env_assist.assert_reports(REPORTS)
+
+ def test_can_skip_all_offline(self):
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ **FAIL_HTTP_KWARGS
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ **FAIL_HTTP_KWARGS
+ )
+ .env.push_cib(remove=".//primitive/meta_attributes")
+ .runner.pcmk.remove_node(NODE_NAME)
+ )
+ node_remove_guest(self.env_assist.get_env(), skip_offline_nodes=True)
+ self.env_assist.assert_reports(
+ REPORTS.remove(
+ "pcmk_remote_disable_success",
+ "pcmk_remote_stop_success",
+ "authkey_remove_success",
+ )
+ +
+ EXTRA_REPORTS.select(
+ "manage_services_connection_failed_warn",
+ "remove_file_connection_failed_warn"
+ ))
+
+ def test_fail_when_identifier_not_found(self):
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_guest(
+ self.env_assist.get_env(),
+ node_identifier="NOEXISTENT"
+ ),
+ [
+ fixture.error(
+ report_codes.NODE_NOT_FOUND,
+ node="NOEXISTENT",
+ searched_types="guest",
+ )
+ ],
+ expected_in_processor=False
+ )
+
+class MultipleResults(TestCase):
+ fixture_multi_resources = """
+ <resources>
+ <primitive class="ocf" id="{0}"
+ provider="heartbeat" type="VirtualDomain"
+ >
+ <meta_attributes id="A-M">
+ <nvpair id="A-M-RAddr" name="remote-addr" value="{1}"/>
+ <nvpair id="A-M-RNode" name="remote-node" value="{2}"/>
+ </meta_attributes>
+ </primitive>
+
+ <primitive class="ocf" id="{1}"
+ provider="heartbeat" type="VirtualDomain"
+ >
+ <meta_attributes id="B-M">
+ <nvpair id="B-M-RAddr" name="remote-addr" value="{3}"/>
+ <nvpair id="B-M-RNode" name="remote-node" value="{4}"/>
+ </meta_attributes>
+ </primitive>
+
+ <primitive class="ocf" id="C"
+ provider="heartbeat" type="VirtualDomain"
+ >
+ <meta_attributes id="C-M">
+ <nvpair id="C-M-RAddr" name="remote-addr" value="{2}"/>
+ <nvpair id="C-M-RNode" name="remote-node" value="{1}"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """.format(
+ VIRTUAL_MACHINE_ID, REMOTE_HOST, NODE_NAME, "B-ADDR", "B-NAME"
+ )
+
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.config.runner.cib.load(resources=self.fixture_multi_resources)
+ self.multiple_result_reports = (fixture.ReportStore()
+ .error(
+ "multiple_result_found",
+ report_codes.MULTIPLE_RESULTS_FOUND,
+ result_identifier_list=[
+ VIRTUAL_MACHINE_ID,
+ REMOTE_HOST,
+ "C",
+ ],
+ result_type="resource",
+ search_description=REMOTE_HOST,
+ force_code=report_codes.FORCE_REMOVE_MULTIPLE_NODES
+ )
+ .as_warn(
+ "multiple_result_found",
+ "multiple_result_found_warn",
+ )
+ )
+
+ def test_fail(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_guest(
+ self.env_assist.get_env(),
+ node_identifier=REMOTE_HOST
+ ),
+ self.multiple_result_reports.select("multiple_result_found").reports
+ )
+ def test_force(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ communication_list=[
+ dict(label="B-NAME", address_list=["B-ADDR"]),
+ dict(label=REMOTE_HOST, address_list=[NODE_NAME]),
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST]),
+ ],
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label="B-NAME", address_list=["B-ADDR"]),
+ dict(label=REMOTE_HOST, address_list=[NODE_NAME]),
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST]),
+ ],
+ )
+ .env.push_cib(remove=[
+ ".//meta_attributes[@id='A-M']",
+ ".//meta_attributes[@id='B-M']",
+ ".//meta_attributes[@id='C-M']",
+ ])
+ .runner.pcmk.remove_node("B-NAME", name="runner.pcmk.remove_node3")
+ .runner.pcmk.remove_node(REMOTE_HOST)
+ .runner.pcmk.remove_node(NODE_NAME, name="runner.pcmk.remove_node2")
+ )
+ node_remove_guest(
+ self.env_assist.get_env(),
+ node_identifier=REMOTE_HOST,
+ allow_remove_multiple_nodes=True
+ )
+ self.env_assist.assert_reports(
+ REPORTS
+ .adapt(
+ "pcmk_remote_disable_stop_started",
+ node_list=["B-NAME", REMOTE_HOST, NODE_NAME]
+ )
+ .copy(
+ "pcmk_remote_disable_success",
+ "pcmk_remote_disable_success_b_name",
+ node="B-NAME",
+ )
+ .copy(
+ "pcmk_remote_stop_success",
+ "pcmk_remote_stop_success_b_name",
+ node="B-NAME",
+ )
+ .copy(
+ "pcmk_remote_disable_success",
+ "pcmk_remote_disable_success_remote_host",
+ node=REMOTE_HOST,
+ )
+ .copy(
+ "pcmk_remote_stop_success",
+ "pcmk_remote_stop_success_remote_host",
+ node=REMOTE_HOST,
+ )
+ .adapt(
+ "authkey_remove_started",
+ node_list=["B-NAME", REMOTE_HOST, NODE_NAME]
+ )
+ .copy(
+ "authkey_remove_success",
+ "authkey_remove_success_b_name",
+ node="B-NAME",
+ )
+ .copy(
+ "authkey_remove_success",
+ "authkey_remove_success_remote_host",
+ node=REMOTE_HOST,
+ )
+ +
+ self.multiple_result_reports.select("multiple_result_found_warn")
+ )
+
+
+class AuthkeyRemove(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST]
+ )
+ )
+
+ def test_fails_when_offline(self):
+ self.config.local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ **FAIL_HTTP_KWARGS
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_guest(self.env_assist.get_env())
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_remove_success")
+ +
+ EXTRA_REPORTS.select("remove_file_connection_failed")
+ )
+
+ def test_fails_when_remotely_fails(self):
+ self.config.local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ result={
+ "code": "unexpected",
+ "message": "Access denied",
+ }
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_guest(self.env_assist.get_env())
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_remove_success")
+ +
+ EXTRA_REPORTS.select("authkey_remove_failed")
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ (self.config
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ result={
+ "code": "unexpected",
+ "message": "Access denied",
+ }
+ )
+ .env.push_cib(remove=".//primitive/meta_attributes")
+ .runner.pcmk.remove_node(NODE_NAME)
+ )
+ node_remove_guest(
+ self.env_assist.get_env(),
+ allow_pacemaker_remote_service_fail=True
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_remove_success")
+ +
+ EXTRA_REPORTS.select("authkey_remove_failed_warn")
+ )
+
+class PcmkRemoteServiceDestroy(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.config.runner.cib.load(resources=FIXTURE_RESOURCES)
+
+ def test_fails_when_offline(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ **FAIL_HTTP_KWARGS
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_guest(self.env_assist.get_env())
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_disable_success"]
+ +
+ EXTRA_REPORTS.select("manage_services_connection_failed")
+ )
+
+ def test_fails_when_remotely_fails(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ result={
+ "code": "fail",
+ "message": "Action failed",
+ }
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_guest(self.env_assist.get_env())
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_disable_success"]
+ +
+ EXTRA_REPORTS.select(
+ "pcmk_remote_disable_failed",
+ "pcmk_remote_stop_failed",
+ )
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ result={
+ "code": "fail",
+ "message": "Action failed",
+ }
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ )
+ .env.push_cib(remove=".//primitive/meta_attributes")
+ .runner.pcmk.remove_node(NODE_NAME)
+ )
+ node_remove_guest(
+ self.env_assist.get_env(),
+ allow_pacemaker_remote_service_fail=True
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove(
+ "pcmk_remote_disable_success",
+ "pcmk_remote_stop_success",
+ )
+ +
+ EXTRA_REPORTS.select(
+ "pcmk_remote_disable_failed_warn",
+ "pcmk_remote_stop_failed_warn",
+ )
+ )
diff --git a/pcs/lib/commands/test/remote_node/test_node_remove_remote.py b/pcs/lib/commands/test/remote_node/test_node_remove_remote.py
new file mode 100644
index 0000000..cbdf36e
--- /dev/null
+++ b/pcs/lib/commands/test/remote_node/test_node_remove_remote.py
@@ -0,0 +1,447 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from functools import partial
+
+from pcs.common import report_codes
+from pcs.lib.commands.remote_node import(
+ node_remove_remote as node_remove_remote_orig
+)
+from pcs.lib.commands.test.remote_node.fixtures_add import FAIL_HTTP_KWARGS
+from pcs.lib.commands.test.remote_node.fixtures_remove import(
+ EnvConfigMixin,
+ REPORTS as FIXTURE_REPORTS,
+ EXTRA_REPORTS as FIXTURE_EXTRA_REPORTS,
+)
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+
+NODE_NAME = "node-name"
+REMOTE_HOST = "remote-host"
+NODE_1 = "node-1"
+NODE_2 = "node-2"
+
+def node_remove_remote(env, node_identifier=REMOTE_HOST, *args, **kwargs):
+ node_remove_remote_orig(env, node_identifier, *args, **kwargs)
+
+FIXTURE_RESOURCES = """
+ <resources>
+ <primitive class="ocf" id="{0}" provider="pacemaker" type="remote">
+ <instance_attributes id="node-name-instance_attributes">
+ <nvpair
+ id="node-name-instance_attributes-server"
+ name="server" value="{1}"
+ />
+ </instance_attributes>
+ </primitive>
+ </resources>
+""".format(
+ NODE_NAME,
+ REMOTE_HOST,
+)
+
+REPORTS = (FIXTURE_REPORTS
+ .adapt("pcmk_remote_disable_stop_started", node_list=[NODE_NAME])
+ .adapt("pcmk_remote_disable_success", node=NODE_NAME)
+ .adapt("pcmk_remote_stop_success", node=NODE_NAME)
+ .adapt("authkey_remove_started", node_list=[NODE_NAME])
+ .adapt("authkey_remove_success", node=NODE_NAME)
+)
+
+EXTRA_REPORTS = (FIXTURE_EXTRA_REPORTS
+ .adapt_multi(
+ [
+ "manage_services_connection_failed",
+ "manage_services_connection_failed_warn",
+ "remove_file_connection_failed",
+ "remove_file_connection_failed_warn",
+ ],
+ node=REMOTE_HOST
+ )
+ .adapt_multi(
+ [
+ "authkey_remove_failed",
+ "authkey_remove_failed_warn",
+ "pcmk_remote_disable_failed",
+ "pcmk_remote_disable_failed_warn",
+ "pcmk_remote_stop_failed",
+ "pcmk_remote_stop_failed_warn",
+ ],
+ node=NODE_NAME
+ )
+)
+
+get_env_tools = partial(get_env_tools, local_extensions={
+ "local": EnvConfigMixin
+})
+
+class RemoveRemote(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.remove_resource = mock.Mock()
+
+ def find_by(self, identifier):
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST]
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ )
+ )
+ node_remove_remote(
+ self.env_assist.get_env(),
+ node_identifier=identifier,
+ remove_resource=self.remove_resource
+ )
+ self.remove_resource.assert_called_once_with(
+ NODE_NAME,
+ is_remove_remote_context=True
+ )
+ self.env_assist.assert_reports(REPORTS)
+
+ def test_success_base(self):
+ self.find_by(REMOTE_HOST)
+
+ def test_can_find_by_node_name(self):
+ self.find_by(NODE_NAME)
+
+class RemoveRemoteOthers(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.remove_resource = mock.Mock()
+
+ def test_can_skip_all_offline(self):
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ **FAIL_HTTP_KWARGS
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ **FAIL_HTTP_KWARGS
+ )
+ )
+ node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ skip_offline_nodes=True
+ )
+ self.remove_resource.assert_called_once_with(
+ NODE_NAME,
+ is_remove_remote_context=True
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove(
+ "pcmk_remote_disable_success",
+ "pcmk_remote_stop_success",
+ "authkey_remove_success",
+ )
+ +
+ EXTRA_REPORTS.select(
+ "manage_services_connection_failed_warn",
+ "remove_file_connection_failed_warn"
+ )
+ )
+
+ def test_fail_when_identifier_not_found(self):
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ node_identifier="NOEXISTENT"
+ ),
+ [
+ fixture.error(
+ report_codes.NODE_NOT_FOUND,
+ node="NOEXISTENT",
+ searched_types="remote",
+ )
+ ],
+ expected_in_processor=False
+ )
+
+class MultipleResults(TestCase):
+ fixture_multi_resources = """
+ <resources>
+ <primitive class="ocf" id="{0}" provider="pacemaker" type="remote">
+ <instance_attributes id="node-name-instance_attributes">
+ <nvpair
+ id="node-name-instance_attributes-server"
+ name="server" value="{1}"
+ />
+ </instance_attributes>
+ </primitive>
+ <primitive class="ocf" id="{1}" provider="pacemaker" type="remote">
+ <instance_attributes id="node-name-instance_attributes">
+ <nvpair
+ id="node-name-instance_attributes-server"
+ name="server" value="{2}"
+ />
+ </instance_attributes>
+ </primitive>
+ </resources>
+ """.format(
+ NODE_NAME,
+ REMOTE_HOST,
+ "OTHER-REMOTE"
+ )
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.remove_resource = mock.Mock()
+ (self.config
+ .runner.cib.load(resources=self.fixture_multi_resources)
+ )
+ self.multiple_result_reports = (fixture.ReportStore()
+ .error(
+ "multiple_result_found",
+ report_codes.MULTIPLE_RESULTS_FOUND,
+ result_identifier_list=[
+ NODE_NAME,
+ REMOTE_HOST,
+ ],
+ result_type="resource",
+ search_description=REMOTE_HOST,
+ force_code=report_codes.FORCE_REMOVE_MULTIPLE_NODES
+ )
+ .as_warn(
+ "multiple_result_found",
+ "multiple_result_found_warn",
+ )
+ )
+
+ def test_fail(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_remote(
+ self.env_assist.get_env(),
+ node_identifier=REMOTE_HOST,
+ remove_resource=self.remove_resource
+ ),
+ self.multiple_result_reports.select("multiple_result_found").reports
+ )
+
+ def test_force(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ communication_list=[
+ dict(label=REMOTE_HOST, address_list=["OTHER-REMOTE"]),
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST]),
+ ]
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=REMOTE_HOST, address_list=["OTHER-REMOTE"]),
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST]),
+ ],
+ )
+ )
+ node_remove_remote(
+ self.env_assist.get_env(),
+ node_identifier=REMOTE_HOST,
+ remove_resource=self.remove_resource,
+ allow_remove_multiple_nodes=True,
+ )
+ self.env_assist.assert_reports(
+ REPORTS
+ .adapt(
+ "pcmk_remote_disable_stop_started",
+ node_list=[REMOTE_HOST, NODE_NAME]
+ )
+ .copy(
+ "pcmk_remote_disable_success",
+ "pcmk_remote_disable_success_remote_host",
+ node=REMOTE_HOST,
+ )
+ .copy(
+ "pcmk_remote_stop_success",
+ "pcmk_remote_stop_success_remote_host",
+ node=REMOTE_HOST,
+ )
+ .adapt(
+ "authkey_remove_started",
+ node_list=[REMOTE_HOST, NODE_NAME]
+ )
+ .copy(
+ "authkey_remove_success",
+ "authkey_remove_success_remote_host",
+ node=REMOTE_HOST,
+ )
+ +
+ self.multiple_result_reports.select("multiple_result_found_warn")
+ )
+
+
+class AuthkeyRemove(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .runner.cib.load(resources=FIXTURE_RESOURCES)
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST]
+ )
+ )
+ self.remove_resource = mock.Mock()
+
+ def test_fails_when_offline(self):
+ self.config.local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ **FAIL_HTTP_KWARGS
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ )
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_remove_success")
+ +
+ EXTRA_REPORTS.select("remove_file_connection_failed")
+ )
+
+ def test_fails_when_remotely_fails(self):
+ self.config.local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ result={
+ "code": "unexpected",
+ "message": "Access denied",
+ }
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ )
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_remove_success")
+ +
+ EXTRA_REPORTS.select("authkey_remove_failed")
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ self.config.local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ result={
+ "code": "unexpected",
+ "message": "Access denied",
+ }
+ )
+ node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ allow_pacemaker_remote_service_fail=True
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove("authkey_remove_success")
+ +
+ EXTRA_REPORTS.select("authkey_remove_failed_warn")
+ )
+
+class PcmkRemoteServiceDestroy(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.config.runner.cib.load(resources=FIXTURE_RESOURCES)
+ self.remove_resource = mock.Mock()
+
+ def test_fails_when_offline(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ **FAIL_HTTP_KWARGS
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ )
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_disable_success"]
+ +
+ EXTRA_REPORTS.select("manage_services_connection_failed")
+ )
+
+ def test_fails_when_remotely_fails(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ result={
+ "code": "fail",
+ "message": "Action failed",
+ }
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ )
+ )
+ self.env_assist.assert_reports(
+ REPORTS[:"pcmk_remote_disable_success"]
+ +
+ EXTRA_REPORTS.select(
+ "pcmk_remote_disable_failed",
+ "pcmk_remote_stop_failed",
+ )
+ )
+
+ def test_forceable_when_remotely_fail(self):
+ (self.config
+ .local.destroy_pacemaker_remote(
+ label=NODE_NAME,
+ address_list=[REMOTE_HOST],
+ result={
+ "code": "fail",
+ "message": "Action failed",
+ }
+ )
+ .local.remove_authkey(
+ communication_list=[
+ dict(label=NODE_NAME, address_list=[REMOTE_HOST])
+ ],
+ )
+ )
+ node_remove_remote(
+ self.env_assist.get_env(),
+ remove_resource=self.remove_resource,
+ allow_pacemaker_remote_service_fail=True
+ )
+ self.env_assist.assert_reports(
+ REPORTS.remove(
+ "pcmk_remote_disable_success",
+ "pcmk_remote_stop_success",
+ )
+ +
+ EXTRA_REPORTS.select(
+ "pcmk_remote_disable_failed_warn",
+ "pcmk_remote_stop_failed_warn",
+ )
+ )
diff --git a/pcs/lib/commands/test/resource/test_bundle_create.py b/pcs/lib/commands/test/resource/test_bundle_create.py
index 8f5ebe5..ab3d6aa 100644
--- a/pcs/lib/commands/test/resource/test_bundle_create.py
+++ b/pcs/lib/commands/test/resource/test_bundle_create.py
@@ -249,7 +249,7 @@ class CreateDocker(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "container",
@@ -282,7 +282,7 @@ class CreateDocker(TestCase):
self.env_assist.assert_reports([
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "container",
@@ -376,7 +376,7 @@ class CreateWithNetwork(TestCase):
),
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "network",
@@ -424,7 +424,7 @@ class CreateWithNetwork(TestCase):
),
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "network",
@@ -624,7 +624,7 @@ class CreateWithPortMap(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "port-map",
@@ -681,7 +681,7 @@ class CreateWithPortMap(TestCase):
self.env_assist.assert_reports([
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "port-map",
@@ -857,7 +857,7 @@ class CreateWithStorageMap(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "storage-map",
@@ -907,7 +907,7 @@ class CreateWithStorageMap(TestCase):
[
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "storage-map",
diff --git a/pcs/lib/commands/test/resource/test_bundle_update.py b/pcs/lib/commands/test/resource/test_bundle_update.py
index 8993909..934a0ea 100644
--- a/pcs/lib/commands/test/resource/test_bundle_update.py
+++ b/pcs/lib/commands/test/resource/test_bundle_update.py
@@ -53,7 +53,7 @@ class Basics(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "B1",
- "id_description": "bundle",
+ "expected_types": ["bundle"],
"context_type": "resources",
"context_id": "",
},
@@ -226,7 +226,7 @@ class ContainerDocker(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "container",
@@ -254,7 +254,7 @@ class ContainerDocker(TestCase):
self.env_assist.assert_reports([
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "container",
@@ -417,7 +417,7 @@ class Network(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "network",
@@ -445,7 +445,7 @@ class Network(TestCase):
[
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["extra", ],
"option_type": "network",
@@ -609,7 +609,7 @@ class PortMap(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "B1-port-map-8080",
- "id_description": "port-map",
+ "expected_types": ["port-map"],
"context_type": "bundle",
"context_id": "B1",
},
@@ -742,7 +742,7 @@ class StorageMap(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "B1-storage-map-1",
- "id_description": "storage-map",
+ "expected_types": ["storage-map"],
"context_type": "bundle",
"context_id": "B1",
},
diff --git a/pcs/lib/commands/test/resource/test_resource_create.py b/pcs/lib/commands/test/resource/test_resource_create.py
index f0d7f34..2abe605 100644
--- a/pcs/lib/commands/test/resource/test_resource_create.py
+++ b/pcs/lib/commands/test/resource/test_resource_create.py
@@ -363,30 +363,6 @@ fixture_cib_resources_xml_clone_simplest_disabled = """<resources>
</clone>
</resources>"""
-def fixture_state_resources_xml(role="Started", failed="false"):
- return(
- """
- <resources>
- <resource
- id="A"
- resource_agent="ocf::heartbeat:Dummy"
- role="{role}"
- active="true"
- orphaned="false"
- managed="true"
- failed="{failed}"
- failure_ignored="false"
- nodes_running_on="1"
- >
- <node name="node1" id="1" cached="false"/>
- </resource>
- </resources>
- """.format(
- role=role,
- failed=failed,
- )
- )
-
class Create(TestCase):
fixture_sanitized_operation = """
<resources>
@@ -536,9 +512,7 @@ class CreateWait(TestCase):
def test_wait_ok_run_fail(self):
(self.config
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(failed="true")
- )
+ .runner.pcmk.load_state(raw_resources=dict(failed="true"))
)
self.env_assist.assert_raise_library_error(
@@ -552,9 +526,7 @@ class CreateWait(TestCase):
)
def test_wait_ok_run_ok(self):
- (self.config
- .runner.pcmk.load_state(resources=fixture_state_resources_xml())
- )
+ self.config.runner.pcmk.load_state(raw_resources=dict())
create(self.env_assist.get_env(), wait=TIMEOUT)
self.env_assist.assert_reports([
fixture.info(
@@ -566,7 +538,7 @@ class CreateWait(TestCase):
def test_wait_ok_disable_fail(self):
(self.config
- .runner.pcmk.load_state(resources=fixture_state_resources_xml())
+ .runner.pcmk.load_state(raw_resources=dict())
.env.push_cib(
resources=fixture_cib_resources_xml_simplest_disabled,
wait=TIMEOUT,
@@ -591,9 +563,7 @@ class CreateWait(TestCase):
def test_wait_ok_disable_ok(self):
(self.config
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
.env.push_cib(
resources=fixture_cib_resources_xml_simplest_disabled,
wait=TIMEOUT,
@@ -611,9 +581,7 @@ class CreateWait(TestCase):
def test_wait_ok_disable_ok_by_target_role(self):
(self.config
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
.env.push_cib(
resources=fixture_cib_resources_xml_simplest_disabled,
wait=TIMEOUT,
@@ -673,9 +641,7 @@ class CreateAsMaster(TestCase):
resources=fixture_cib_resources_xml_master_simplest,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(failed="true")
- )
+ .runner.pcmk.load_state(raw_resources=dict(failed="true"))
)
self.env_assist.assert_raise_library_error(
lambda: create_master(self.env_assist.get_env()),
@@ -693,9 +659,7 @@ class CreateAsMaster(TestCase):
resources=fixture_cib_resources_xml_master_simplest,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml()
- )
+ .runner.pcmk.load_state(raw_resources=dict())
)
create_master(self.env_assist.get_env())
self.env_assist.assert_reports([
@@ -712,9 +676,7 @@ class CreateAsMaster(TestCase):
resources=fixture_cib_resources_xml_master_simplest_disabled,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml()
- )
+ .runner.pcmk.load_state(raw_resources=dict())
)
self.env_assist.assert_raise_library_error(
@@ -734,9 +696,7 @@ class CreateAsMaster(TestCase):
resources=fixture_cib_resources_xml_master_simplest_disabled,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_master(self.env_assist.get_env(), disabled=True)
self.env_assist.assert_reports([
@@ -788,9 +748,7 @@ class CreateAsMaster(TestCase):
""",
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_master(
self.env_assist.get_env(),
@@ -809,9 +767,7 @@ class CreateAsMaster(TestCase):
=fixture_cib_resources_xml_master_simplest_disabled_meta_after,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_master(
self.env_assist.get_env(),
@@ -866,9 +822,7 @@ class CreateAsMaster(TestCase):
""",
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_master(
self.env_assist.get_env(),
@@ -924,9 +878,7 @@ class CreateAsMaster(TestCase):
""",
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_master(
self.env_assist.get_env(),
@@ -1012,9 +964,7 @@ class CreateInGroup(TestCase):
resources=fixture_cib_resources_xml_group_simplest,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(failed="true")
- )
+ .runner.pcmk.load_state(raw_resources=dict(failed="true"))
)
self.env_assist.assert_raise_library_error(
lambda: create_group(self.env_assist.get_env()),
@@ -1032,9 +982,7 @@ class CreateInGroup(TestCase):
resources=fixture_cib_resources_xml_group_simplest,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml()
- )
+ .runner.pcmk.load_state(raw_resources=dict())
)
create_group(self.env_assist.get_env())
self.env_assist.assert_reports([
@@ -1051,9 +999,7 @@ class CreateInGroup(TestCase):
resources=fixture_cib_resources_xml_group_simplest_disabled,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml()
- )
+ .runner.pcmk.load_state(raw_resources=dict())
)
self.env_assist.assert_raise_library_error(
@@ -1073,9 +1019,7 @@ class CreateInGroup(TestCase):
resources=fixture_cib_resources_xml_group_simplest_disabled,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_group(self.env_assist.get_env(), disabled=True)
self.env_assist.assert_reports([
@@ -1091,9 +1035,7 @@ class CreateInGroup(TestCase):
resources=fixture_cib_resources_xml_group_simplest_disabled,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_group(
self.env_assist.get_env(),
@@ -1144,9 +1086,7 @@ class CreateAsClone(TestCase):
resources=fixture_cib_resources_xml_clone_simplest,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(failed="true")
- )
+ .runner.pcmk.load_state(raw_resources=dict(failed="true"))
)
self.env_assist.assert_raise_library_error(
lambda: create_clone(self.env_assist.get_env()),
@@ -1164,9 +1104,7 @@ class CreateAsClone(TestCase):
resources=fixture_cib_resources_xml_clone_simplest,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml()
- )
+ .runner.pcmk.load_state(raw_resources=dict())
)
create_clone(self.env_assist.get_env())
self.env_assist.assert_reports([
@@ -1183,9 +1121,7 @@ class CreateAsClone(TestCase):
resources=fixture_cib_resources_xml_clone_simplest_disabled,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml()
- )
+ .runner.pcmk.load_state(raw_resources=dict())
)
self.env_assist.assert_raise_library_error(
@@ -1205,9 +1141,7 @@ class CreateAsClone(TestCase):
resources=fixture_cib_resources_xml_clone_simplest_disabled,
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_clone(self.env_assist.get_env(), disabled=True)
self.env_assist.assert_reports([
@@ -1260,9 +1194,7 @@ class CreateAsClone(TestCase):
""",
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_clone(
self.env_assist.get_env(),
@@ -1317,9 +1249,7 @@ class CreateAsClone(TestCase):
""",
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_clone(
self.env_assist.get_env(),
@@ -1374,9 +1304,7 @@ class CreateAsClone(TestCase):
""",
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_clone(
self.env_assist.get_env(),
@@ -1432,9 +1360,7 @@ class CreateAsClone(TestCase):
""",
wait=TIMEOUT
)
- .runner.pcmk.load_state(
- resources=fixture_state_resources_xml(role="Stopped")
- )
+ .runner.pcmk.load_state(raw_resources=dict(role="Stopped"))
)
create_clone(
self.env_assist.get_env(),
@@ -1602,7 +1528,7 @@ class CreateInToBundle(TestCase):
fixture.error(
report_codes.ID_NOT_FOUND,
id="B",
- id_description="bundle",
+ expected_types=["bundle"],
context_type="resources",
context_id="",
)
diff --git a/pcs/lib/commands/test/sbd/test_disable_sbd.py b/pcs/lib/commands/test/sbd/test_disable_sbd.py
index ed2bc46..e30b55d 100644
--- a/pcs/lib/commands/test/sbd/test_disable_sbd.py
+++ b/pcs/lib/commands/test/sbd/test_disable_sbd.py
@@ -4,8 +4,6 @@ from __future__ import (
print_function,
)
-import json
-
from pcs.common import report_codes
from pcs.lib.commands.sbd import disable_sbd
from pcs.test.tools import fixture
@@ -16,42 +14,241 @@ from pcs.test.tools.pcs_unittest import TestCase
class DisableSbd(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(self)
+ self.corosync_conf_name = "corosync-3nodes.conf"
+ self.node_list = ["rh7-1", "rh7-2", "rh7-3"]
- def test_base(self):
- (self.config
- .runner.corosync.version()
- .corosync_conf.load(
- node_name_list=["node-1", "node-2"],
- )
- .http.add_communication(
- "check_auth",
- [
- dict(
- label="node-1",
- output=json.dumps({"notauthorized": "true"}),
- response_code=401,
- ),
- dict(
- label="node-2",
- output=json.dumps({"success": "true"}),
- response_code=200,
- ),
- ],
- action="remote/check_auth",
- param_list=[('check_auth_only', 1)]
- )
+ def test_success(self):
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero(
+ node_labels=self.node_list[:1]
+ )
+ self.config.http.sbd.disable_sbd(node_labels=self.node_list)
+ disable_sbd(self.env_assist.get_env())
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.SBD_DISABLING_STARTED)]
+ +
+ [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ service="sbd",
+ node=node,
+ instance=None
+ ) for node in self.node_list
+ ]
+ +
+ [
+ fixture.warn(
+ report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES
+ )
+ ]
)
+ def test_node_offline(self):
+ err_msg = "Failed connect to rh7-3:2224; No route to host"
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(
+ communication_list=[
+ {"label": "rh7-1"},
+ {"label": "rh7-2"},
+ {
+ "label": "rh7-3",
+ "was_connected": False,
+ "errno": 7,
+ "error_msg": err_msg,
+ }
+ ]
+ )
self.env_assist.assert_raise_library_error(
lambda: disable_sbd(self.env_assist.get_env()),
[],
+ expected_in_processor=False
)
-
self.env_assist.assert_reports([
fixture.error(
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- node="node-1",
- reason="HTTP error: 401",
- command="remote/check_auth",
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="rh7-3",
+ reason=err_msg,
+ command="remote/check_auth"
)
])
+
+ def test_success_node_offline_skip_offline(self):
+ err_msg = "Failed connect to rh7-3:2224; No route to host"
+ online_nodes_list = ["rh7-2", "rh7-3"]
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(
+ communication_list=[
+ {
+ "label": "rh7-1",
+ "was_connected": False,
+ "errno": 7,
+ "error_msg": err_msg,
+ },
+ {"label": "rh7-2"},
+ {"label": "rh7-3"}
+ ]
+ )
+ self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero(
+ node_labels=online_nodes_list[:1]
+ )
+ self.config.http.sbd.disable_sbd(node_labels=online_nodes_list)
+ disable_sbd(self.env_assist.get_env(), ignore_offline_nodes=True)
+ self.env_assist.assert_reports(
+ [fixture.warn(report_codes.OMITTING_NODE, node="rh7-1")]
+ +
+ [fixture.info(report_codes.SBD_DISABLING_STARTED)]
+ +
+ [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ service="sbd",
+ node=node,
+ instance=None
+ ) for node in online_nodes_list
+ ]
+ +
+ [
+ fixture.warn(
+ report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES
+ )
+ ]
+ )
+
+ def test_set_stonith_watchdog_timeout_fails_on_some_nodes(self):
+ err_msg = "Error"
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero(
+ communication_list=[
+ [{
+ "label": "rh7-1",
+ "was_connected": False,
+ "errno": 7,
+ "error_msg": err_msg,
+ }],
+ [{
+ "label": "rh7-2",
+ "response_code": 400,
+ "output": "FAILED",
+ }],
+ [{"label": "rh7-3"}]
+ ]
+ )
+ self.config.http.sbd.disable_sbd(node_labels=self.node_list)
+ disable_sbd(self.env_assist.get_env())
+ self.env_assist.assert_reports(
+ [
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node="rh7-1",
+ reason=err_msg,
+ command="remote/set_stonith_watchdog_timeout_to_zero"
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node="rh7-2",
+ reason="FAILED",
+ command="remote/set_stonith_watchdog_timeout_to_zero"
+ )
+ ]
+ +
+ [fixture.info(report_codes.SBD_DISABLING_STARTED)]
+ +
+ [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ service="sbd",
+ node=node,
+ instance=None
+ ) for node in self.node_list
+ ]
+ +
+ [
+ fixture.warn(
+ report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES
+ )
+ ]
+ )
+
+ def test_set_stonith_watchdog_timeout_fails_on_all_nodes(self):
+ err_msg = "Error"
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero(
+ communication_list=[
+ [dict(label=node, response_code=400, output=err_msg)]
+ for node in self.node_list
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: disable_sbd(self.env_assist.get_env()),
+ [],
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=node,
+ reason=err_msg,
+ command="remote/set_stonith_watchdog_timeout_to_zero"
+ ) for node in self.node_list
+ ]
+ +
+ [
+ fixture.error(
+ report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE,
+ )
+ ]
+ )
+
+ def test_disable_failed(self):
+ err_msg = "Error"
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero(
+ node_labels=self.node_list[:1]
+ )
+ self.config.http.sbd.disable_sbd(
+ communication_list=[
+ {"label": "rh7-1"},
+ {"label": "rh7-2"},
+ {
+ "label": "rh7-3",
+ "response_code": 400,
+ "output": err_msg
+ }
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: disable_sbd(self.env_assist.get_env()),
+ [],
+ )
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.SBD_DISABLING_STARTED)]
+ +
+ [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ service="sbd",
+ node=node,
+ instance=None
+ ) for node in self.node_list[:2]
+ ]
+ +
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node="rh7-3",
+ reason=err_msg,
+ command="remote/sbd_disable"
+ )
+ ]
+ )
diff --git a/pcs/lib/commands/test/sbd/test_enable_sbd.py b/pcs/lib/commands/test/sbd/test_enable_sbd.py
index e870c5a..d0be0f4 100644
--- a/pcs/lib/commands/test/sbd/test_enable_sbd.py
+++ b/pcs/lib/commands/test/sbd/test_enable_sbd.py
@@ -6,45 +6,506 @@ from __future__ import (
import json
+from pcs import settings
from pcs.common import report_codes
-from pcs.lib.commands.sbd import enable_sbd
+from pcs.lib.commands.sbd import enable_sbd, ALLOWED_SBD_OPTION_LIST
from pcs.test.tools import fixture
from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.pcs_unittest import TestCase
-from pcs.test.tools.misc import outdent
+from pcs.test.tools.misc import get_test_resource, outdent
+from pcs.lib.corosync.config_parser import parse_string
-class EnableSbd(TestCase):
+
+def _check_sbd_comm_success_fixture(node, watchdog, device_list):
+ return dict(
+ label=node,
+ output=json.dumps({
+ "sbd": {
+ "installed": True,
+ },
+ "watchdog": {
+ "exist": True,
+ "path": watchdog,
+ },
+ "device_list": [
+ dict(path=dev, exist=True, block_device=True)
+ for dev in device_list
+ ],
+ }),
+ param_list=[
+ ("watchdog", watchdog),
+ ("device_list", json.dumps(device_list)),
+ ]
+ )
+
+
+def _get_corosync_conf_text_with_atb(orig_cfg_file):
+ corosync_conf = parse_string(open(get_test_resource(orig_cfg_file)).read())
+ for quorum in corosync_conf.get_sections(name="quorum"):
+ quorum.del_attributes_by_name("two_node")
+ quorum.set_attribute("auto_tie_breaker", 1)
+ return corosync_conf.export()
+
+
+def _sbd_enable_successful_report_list_fixture(
+ online_node_list, skipped_offline_node_list=(), err_msg="err", atb_set=False
+):
+ report_list = (
+ [
+ fixture.warn(report_codes.OMITTING_NODE, node=node)
+ for node in skipped_offline_node_list
+ ]
+ +
+ [fixture.info(report_codes.SBD_CHECK_STARTED)]
+ +
+ [
+ fixture.info(report_codes.SBD_CHECK_SUCCESS, node=node)
+ for node in online_node_list
+ ]
+ )
+ if atb_set:
+ report_list += (
+ [
+ fixture.warn(report_codes.SBD_REQUIRES_ATB),
+ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
+ ]
+ +
+ [
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node=node
+ ) for node in online_node_list
+ ]
+ +
+ [
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=node,
+ reason=err_msg,
+ command="remote/status",
+ ) for node in skipped_offline_node_list
+ ]
+ +
+ [
+ fixture.warn(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+ node=node,
+ ) for node in skipped_offline_node_list
+ ]
+ +
+ [fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED)]
+ +
+ [
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=node,
+ reason=err_msg,
+ command="remote/set_corosync_conf",
+ ) for node in skipped_offline_node_list
+ ]
+ +
+ [
+ fixture.warn(
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ node=node,
+ ) for node in skipped_offline_node_list
+ ]
+ +
+ [
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node=node
+ ) for node in online_node_list
+ ]
+ )
+ return (
+ report_list
+ +
+ [fixture.info(report_codes.SBD_CONFIG_DISTRIBUTION_STARTED)]
+ +
+ [
+ fixture.info(report_codes.SBD_CONFIG_ACCEPTED_BY_NODE, node=node)
+ for node in online_node_list
+ ]
+ +
+ [fixture.info(report_codes.SBD_ENABLING_STARTED)]
+ +
+ [
+ fixture.info(
+ report_codes.SERVICE_ENABLE_SUCCESS,
+ service="sbd",
+ node=node,
+ instance=None
+ ) for node in online_node_list
+ ]
+ +
+ [fixture.warn(report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES)]
+ )
+
+
+class OddNumOfNodesSuccess(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(self)
- (self.config
- .runner.corosync.version()
- .corosync_conf.load(
- node_name_list=["node-1", "node-2"],
- auto_tie_breaker=True,
- )
- .http.add_communication(
- "check_auth",
- [
- dict(
- label="node-1",
- output=json.dumps({"success": True}),
- response_code=200,
- ),
- dict(
- label="node-2",
- was_connected=False,
- errno=7,
- error_msg="Failed connect to node-2:2224;"
- " No route to host"
- ,
- ),
- ],
- action="remote/check_auth",
- param_list=[("check_auth_only", 1)],
+ self.corosync_conf_name = "corosync-3nodes.conf"
+ self.node_list = ["rh7-1", "rh7-2", "rh7-3"]
+ self.sbd_options = {
+ "SBD_WATCHDOG_TIMEOUT": "10",
+ "SBD_STARTMODE": "clean",
+ }
+ self.sbd_config_template = outdent("""\
+ # This file has been generated by pcs.
+ SBD_DELAY_START=no
+ {devices}SBD_OPTS="-n {node_name}"
+ SBD_PACEMAKER=yes
+ SBD_STARTMODE=clean
+ SBD_WATCHDOG_DEV={watchdog}
+ SBD_WATCHDOG_TIMEOUT=10
+ """)
+ self.watchdog_dict = {
+ node: "/dev/watchdog-{0}".format(node) for node in self.node_list
+ }
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+
+ def test_with_devices(self):
+ device_dict = {
+ node: ["/dev/{0}-sbd{1}".format(node, j) for j in range(i)]
+ for i, node in enumerate(self.node_list, start=1)
+ }
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node,
+ watchdog=self.watchdog_dict[node],
+ devices='SBD_DEVICE="{0}"\n'.format(";".join(device_dict[node])),
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(
+ node, self.watchdog_dict[node], device_dict[node]
+ ) for node in self.node_list
+ ]
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=None,
+ watchdog_dict=self.watchdog_dict,
+ sbd_options=self.sbd_options,
+ default_device_list=[],
+ node_device_dict=device_dict,
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(self.node_list)
+ )
+
+ def test_no_device(self):
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node,
+ watchdog=self.watchdog_dict[node],
+ devices="",
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(
+ node, self.watchdog_dict[node], []
+ ) for node in self.node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename=self.corosync_conf_name, name="corosync_conf.load2",
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=None,
+ watchdog_dict=self.watchdog_dict,
+ sbd_options=self.sbd_options,
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(self.node_list)
+ )
+
+
+class OddNumOfNodesDefaultsSuccess(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.corosync_conf_name = "corosync-3nodes.conf"
+ self.node_list = ["rh7-1", "rh7-2", "rh7-3"]
+ self.sbd_config_template = outdent("""\
+ # This file has been generated by pcs.
+ SBD_DELAY_START=no
+ {devices}SBD_OPTS="-n {node_name}"
+ SBD_PACEMAKER=yes
+ SBD_STARTMODE=always
+ SBD_WATCHDOG_DEV=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=5
+ """)
+ self.watchdog = "/dev/watchdog"
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+
+ def test_with_device(self):
+ device_list = ["/dev/sdb"]
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node,
+ devices='SBD_DEVICE="{0}"\n'.format(";".join(device_list)),
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(
+ node, self.watchdog, device_list
+ ) for node in self.node_list
+ ]
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ default_device_list=device_list,
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(self.node_list)
+ )
+
+ def test_no_device(self):
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node, devices="",
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(node, self.watchdog, [])
+ for node in self.node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename=self.corosync_conf_name, name="corosync_conf.load2",
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(self.node_list)
+ )
+
+
+class EvenNumOfNodes(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.corosync_conf_name = "corosync.conf"
+ self.node_list = ["rh7-1", "rh7-2"]
+ self.sbd_config_template = outdent("""\
+ # This file has been generated by pcs.
+ SBD_DELAY_START=no
+ {devices}SBD_OPTS="-n {node_name}"
+ SBD_PACEMAKER=yes
+ SBD_STARTMODE=always
+ SBD_WATCHDOG_DEV=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=5
+ """)
+ self.watchdog = "/dev/watchdog"
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+
+ def test_with_device(self):
+ device_list = ["/dev/sdb"]
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node,
+ devices='SBD_DEVICE="{0}"\n'.format(";".join(device_list)),
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(
+ node, self.watchdog, device_list
+ ) for node in self.node_list
+ ]
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ default_device_list=device_list,
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(self.node_list)
+ )
+
+ def test_no_device(self):
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node, devices="",
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(node, self.watchdog, [])
+ for node in self.node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename=self.corosync_conf_name, name="corosync_conf.load2",
+ )
+ self.config.http.corosync.check_corosync_offline(
+ node_labels=self.node_list
+ )
+ self.config.http.corosync.set_corosync_conf(
+ _get_corosync_conf_text_with_atb(self.corosync_conf_name),
+ node_labels=self.node_list,
+ )
+ self.config.runner.systemctl.is_active("corosync", is_active=False)
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True,
)
)
- def test_fail_when_any_node_is_offline(self):
+ def test_no_device_auto_tie_breaker_enabled(self):
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node, devices="",
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(node, self.watchdog, [])
+ for node in self.node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename=self.corosync_conf_name, name="corosync_conf.load2",
+ auto_tie_breaker=True,
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(self.node_list)
+ )
+
+ def test_no_device_with_qdevice(self):
+ config_generator = lambda node: self.sbd_config_template.format(
+ node_name=node, devices="",
+ )
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(node, self.watchdog, [])
+ for node in self.node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename="corosync-qdevice.conf", name="corosync_conf.load2",
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=config_generator, node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(self.node_list)
+ )
+
+class OfflineNodes(TestCase):
+ #pylint: disable=too-many-instance-attributes
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.corosync_conf_name = "corosync.conf"
+ node_list = ["rh7-1", "rh7-2"]
+ self.online_node_list = node_list[:-1]
+ self.offline_node_list = node_list[-1:]
+ self.watchdog = "/dev/watchdog"
+ self.err_msg = "error msg"
+ self.sbd_config_generator = outdent("""\
+ # This file has been generated by pcs.
+ SBD_DELAY_START=no
+ SBD_OPTS="-n {0}"
+ SBD_PACEMAKER=yes
+ SBD_STARTMODE=always
+ SBD_WATCHDOG_DEV=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=5
+ """).format
+ self.offline_communication_list = (
+ [dict(label=node) for node in self.online_node_list]
+ +
+ [
+ dict(
+ label=node,
+ was_connected=False,
+ errno=1,
+ error_msg=self.err_msg,
+ ) for node in self.offline_node_list
+ ]
+ )
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(
+ communication_list=self.offline_communication_list
+ )
+
+ def test_no_ignore_offline_nodes(self):
self.env_assist.assert_raise_library_error(
lambda: enable_sbd(
self.env_assist.get_env(),
@@ -54,119 +515,1176 @@ class EnableSbd(TestCase):
),
[],
)
- self.env_assist.assert_reports([
- fixture.error(
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- node="node-2",
- reason="Failed connect to node-2:2224; No route to host",
- command="remote/check_auth",
- force_code=report_codes.SKIP_OFFLINE_NODES,
- )
- ])
-
- def test_success_enable(self):
- (self.config
- .http.add_communication(
- "check_sbd",
- [
- dict(label="node-1"),
- ],
- output=json.dumps({
- "sbd":{
- "installed": True,
- "enabled": False,
- "running": False
- },
- "watchdog":{
- "path": "/dev/watchdog",
- "exist": True,
- }
- }),
- response_code=200,
- action="remote/check_sbd",
- param_list=[
- ("watchdog", "/dev/watchdog"),
- ("device_list", [])
- ],
- )
- .corosync_conf.load(
- node_name_list=["node-1", "node-2"],
- auto_tie_breaker=True,
- name="corosync_conf.load-extra",
+ self.env_assist.assert_reports(
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node=node,
+ command="remote/check_auth",
+ reason=self.err_msg,
+ ) for node in self.offline_node_list
+ ]
+ )
+
+ def test_ignore_offline_nodes(self):
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(node, self.watchdog, [])
+ for node in self.online_node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename="corosync-qdevice.conf", name="corosync_conf.load2",
+ )
+ self.config.http.sbd.set_sbd_config(
+ config_generator=self.sbd_config_generator,
+ node_labels=self.online_node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.online_node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.online_node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=None,
+ watchdog_dict={},
+ sbd_options={},
+ ignore_offline_nodes=True,
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.warn(
+ report_codes.OMITTING_NODE,
+ node=node,
+ ) for node in self.offline_node_list
+ ]
+ +
+ _sbd_enable_successful_report_list_fixture(self.online_node_list)
+ )
+
+ def test_ignore_offline_nodes_atb_needed(self):
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(node, self.watchdog, [])
+ for node in self.online_node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename=self.corosync_conf_name, name="corosync_conf.load2",
+ )
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=self.offline_communication_list
+ )
+ self.config.http.corosync.set_corosync_conf(
+ _get_corosync_conf_text_with_atb(self.corosync_conf_name),
+ communication_list=self.offline_communication_list,
+ )
+ self.config.runner.systemctl.is_active("corosync", is_active=False)
+ self.config.http.sbd.set_sbd_config(
+ config_generator=self.sbd_config_generator,
+ node_labels=self.online_node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.online_node_list[0]]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.online_node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=None,
+ watchdog_dict={},
+ sbd_options={},
+ ignore_offline_nodes=True,
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.online_node_list,
+ skipped_offline_node_list=self.offline_node_list,
+ err_msg=self.err_msg,
+ atb_set=True,
)
- .http.add_communication(
- "set_sbd_config",
- [
- dict(label="node-1"),
- ],
- output=json.dumps({
- "sbd":{
- "installed": True,
- "enabled": False,
- "running": False
- },
- "watchdog":{
- "path": "/dev/watchdog",
- "exist": True,
- }
- }),
- response_code=200,
- action="remote/set_sbd_config",
- param_list=[("config", outdent(
- """\
- # This file has been generated by pcs.
- SBD_DELAY_START=no
- SBD_OPTS="-n node-1"
- SBD_PACEMAKER=yes
- SBD_STARTMODE=always
- SBD_WATCHDOG_DEV=/dev/watchdog
- SBD_WATCHDOG_TIMEOUT=5
- """
- ))],
+ )
+
+
+class Validations(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.corosync_conf_name = "corosync.conf"
+ self.node_list = ["rh7-1", "rh7-2"]
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+
+ def test_non_existing_node_in_watchdogs(self):
+ unknown_node = "unknown_node"
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=None,
+ watchdog_dict={
+ node: "/dev/watchdog"
+ for node in (self.node_list + [unknown_node])
+ },
+ sbd_options={},
+ ),
+ [
+ fixture.error(
+ report_codes.NODE_NOT_FOUND,
+ node=unknown_node,
+ searched_types=[],
+ )
+ ],
+ )
+ self.env_assist.assert_reports([])
+
+ def test_non_existing_node_in_devices(self):
+ unknown_node = "unknown_node"
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="/dev/watchdog",
+ watchdog_dict={},
+ sbd_options={},
+ default_device_list="/device",
+ node_device_dict={
+ node: ["/device"]
+ for node in (self.node_list + [unknown_node])
+ }
+ ),
+ [
+ fixture.error(
+ report_codes.NODE_NOT_FOUND,
+ node=unknown_node,
+ searched_types=[],
+ )
+ ],
+ )
+ self.env_assist.assert_reports([])
+
+ def test_watchdog_not_abs_path(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="wd1",
+ watchdog_dict={self.node_list[0]: "wd2"},
+ sbd_options={},
+ ),
+ [
+ fixture.error(
+ report_codes.WATCHDOG_INVALID,
+ watchdog=w,
+ ) for w in ["wd1", "wd2"]
+ ],
+ )
+ self.env_assist.assert_reports([])
+
+ def test_device_not_abs_path(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="/dev/watchdog",
+ watchdog_dict={},
+ sbd_options={},
+ default_device_list=["device1"],
+ node_device_dict={self.node_list[0]: ["device2"]}
+ ),
+ [
+ fixture.error(
+ report_codes.SBD_DEVICE_PATH_NOT_ABSOLUTE,
+ node=node,
+ device=dev,
+ ) for node, dev in [
+ (self.node_list[0], "device2"),
+ (self.node_list[1], "device1"),
+ ]
+ ],
+ )
+ self.env_assist.assert_reports([])
+
+ def test_no_device_for_node(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="/dev/watchdog",
+ watchdog_dict={},
+ sbd_options={},
+ default_device_list=[],
+ node_device_dict={self.node_list[0]: ["/dev/device1"]}
+ ),
+ [
+ fixture.error(
+ report_codes.SBD_NO_DEVICE_FOR_NODE,
+ node=self.node_list[1],
+ )
+ ],
+ )
+ self.env_assist.assert_reports([])
+
+ def test_too_many_devices(self):
+ max_dev_num = settings.sbd_max_device_num
+ dev_list = ["/dev/dev{0}".format(i) for i in range(max_dev_num + 1)]
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="/dev/watchdog",
+ watchdog_dict={},
+ sbd_options={},
+ default_device_list=["/dev/dev1"],
+ node_device_dict={self.node_list[0]: dev_list}
+ ),
+ [
+ fixture.error(
+ report_codes.SBD_TOO_MANY_DEVICES_FOR_NODE,
+ node=self.node_list[0],
+ device_list=dev_list,
+ max_devices=max_dev_num,
+ )
+ ],
+ )
+ self.env_assist.assert_reports([])
+
+ def test_unknown_sbd_opts(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="/dev/watchdog",
+ watchdog_dict={},
+ sbd_options={
+ "UNKNOWN_OPT1": 1,
+ "SBD_STARTMODE": "clean",
+ "UNKNOWN_OPT2": "val",
+ "SBD_WATCHDOG_DEV": "dev",
+ },
+ ),
+ [
+ fixture.error(
+ report_codes.INVALID_OPTIONS,
+ option_names=[opt],
+ option_type=None,
+ allowed=sorted(ALLOWED_SBD_OPTION_LIST),
+ allowed_patterns=[],
+ force_code=report_codes.FORCE_OPTIONS,
+ ) for opt in ["UNKNOWN_OPT1", "UNKNOWN_OPT2"]
+ ]
+ +
+ [
+ fixture.error(
+ report_codes.INVALID_OPTIONS,
+ option_names=["SBD_WATCHDOG_DEV"],
+ option_type=None,
+ allowed=sorted(ALLOWED_SBD_OPTION_LIST),
+ allowed_patterns=[],
+ ),
+ ]
+ )
+ self.env_assist.assert_reports([])
+
+ def test_unknown_sbd_opts_allowed(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="/dev/watchdog",
+ watchdog_dict={},
+ sbd_options={
+ "UNKNOWN_OPT1": 1,
+ "SBD_STARTMODE": "clean",
+ "UNKNOWN_OPT2": "val",
+ "SBD_WATCHDOG_DEV": "dev",
+ },
+ allow_unknown_opts=True,
+ ),
+ [
+ fixture.error(
+ report_codes.INVALID_OPTIONS,
+ option_names=["SBD_WATCHDOG_DEV"],
+ option_type=None,
+ allowed=sorted(ALLOWED_SBD_OPTION_LIST),
+ allowed_patterns=[],
+ ),
+ ]
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.warn(
+ report_codes.INVALID_OPTIONS,
+ option_names=[opt],
+ option_type=None,
+ allowed=sorted(ALLOWED_SBD_OPTION_LIST),
+ allowed_patterns=[],
+ ) for opt in ["UNKNOWN_OPT1", "UNKNOWN_OPT2"]
+ ]
+ )
+
+ def test_sbd_not_installed(self):
+ watchdog = "/dev/watchdog"
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(
+ self.node_list[0], watchdog, []
+ ),
+ dict(
+ label=self.node_list[1],
+ output=json.dumps({
+ "sbd": {
+ "installed": False,
+ },
+ "watchdog": {
+ "exist": True,
+ "path": watchdog,
+ },
+ "device_list": [],
+ }),
+ param_list=[
+ ("watchdog", watchdog),
+ ("device_list", "[]"),
+ ]
+ )
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.SBD_CHECK_STARTED)]
+ +
+ [
+ fixture.error(
+ report_codes.SBD_NOT_INSTALLED, node=self.node_list[1]
+ )
+ ]
+ +
+ [
+ fixture.info(
+ report_codes.SBD_CHECK_SUCCESS, node=self.node_list[0]
+ )
+ ]
+ )
+
+ def test_watchdog_not_found(self):
+ watchdog = "/dev/watchdog"
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(
+ self.node_list[0], watchdog, []
+ ),
+ dict(
+ label=self.node_list[1],
+ output=json.dumps({
+ "sbd": {
+ "installed": True,
+ },
+ "watchdog": {
+ "exist": False,
+ "path": watchdog,
+ },
+ "device_list": [],
+ }),
+ param_list=[
+ ("watchdog", watchdog),
+ ("device_list", "[]"),
+ ]
+ )
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.SBD_CHECK_STARTED)]
+ +
+ [
+ fixture.error(
+ report_codes.WATCHDOG_NOT_FOUND,
+ node=self.node_list[1],
+ watchdog=watchdog,
+ )
+ ]
+ +
+ [
+ fixture.info(
+ report_codes.SBD_CHECK_SUCCESS, node=self.node_list[0]
+ )
+ ]
+ )
+
+ def test_device_not_exists_not_block_device(self):
+ watchdog = "/dev/watchdog"
+ device_list = ["/dev/dev0", "/dev/dev1"]
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(
+ self.node_list[0], watchdog, device_list
+ ),
+ dict(
+ label=self.node_list[1],
+ output=json.dumps({
+ "sbd": {
+ "installed": True,
+ },
+ "watchdog": {
+ "exist": True,
+ "path": watchdog,
+ },
+ "device_list": [
+ dict(
+ path=device_list[0],
+ exist=False,
+ block_device=False,
+ ),
+ dict(
+ path=device_list[1],
+ exist=True,
+ block_device=False,
+ ),
+ ],
+ }),
+ param_list=[
+ ("watchdog", watchdog),
+ ("device_list", json.dumps(device_list)),
+ ]
+ )
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ default_device_list=device_list,
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.SBD_CHECK_STARTED)]
+ +
+ [
+ fixture.error(
+ report_codes.SBD_DEVICE_DOES_NOT_EXIST,
+ node=self.node_list[1],
+ device=device_list[0],
+ ),
+ fixture.error(
+ report_codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE,
+ node=self.node_list[1],
+ device=device_list[1],
+ )
+ ]
+ +
+ [
+ fixture.info(
+ report_codes.SBD_CHECK_SUCCESS, node=self.node_list[0]
+ )
+ ]
+ )
+
+ def test_multiple_validation_failures(self):
+ unknown_node_list = ["unknown_node{0}".format(i) for i in range(2)]
+ max_dev_num = settings.sbd_max_device_num
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog="watchdog0",
+ watchdog_dict={
+ unknown_node_list[0]: "/dev/watchdog",
+ },
+ sbd_options={
+ "UNKNOWN_OPT1": 1,
+ "SBD_STARTMODE": "clean",
+ "UNKNOWN_OPT2": "val",
+ "SBD_WATCHDOG_DEV": "dev",
+ },
+ default_device_list=[],
+ node_device_dict={
+ self.node_list[0]: ["dev", "/dev0", "/dev1", "/dev2"],
+ unknown_node_list[0]: ["/dev/device0"],
+ unknown_node_list[1]: ["/dev/device0"],
+ }
+ ),
+ [
+ fixture.error(
+ report_codes.NODE_NOT_FOUND,
+ node=node,
+ searched_types=[],
+ ) for node in unknown_node_list
+ ]
+ +
+ [
+ fixture.error(
+ report_codes.WATCHDOG_INVALID, watchdog="watchdog0"
+ ),
+ fixture.error(
+ report_codes.WATCHDOG_INVALID, watchdog="watchdog0"
+ ),
+ fixture.error(
+ report_codes.SBD_NO_DEVICE_FOR_NODE, node=self.node_list[1],
+ ),
+ fixture.error(
+ report_codes.SBD_TOO_MANY_DEVICES_FOR_NODE,
+ node=self.node_list[0],
+ device_list=["dev", "/dev0", "/dev1", "/dev2"],
+ max_devices=max_dev_num,
+ ),
+ fixture.error(
+ report_codes.INVALID_OPTIONS,
+ option_names=["SBD_WATCHDOG_DEV"],
+ option_type=None,
+ allowed=sorted(ALLOWED_SBD_OPTION_LIST),
+ allowed_patterns=[],
+ ),
+ ]
+ +
+ [
+ fixture.error(
+ report_codes.INVALID_OPTIONS,
+ option_names=[opt],
+ option_type=None,
+ allowed=sorted(ALLOWED_SBD_OPTION_LIST),
+ allowed_patterns=[],
+ force_code=report_codes.FORCE_OPTIONS,
+ ) for opt in ["UNKNOWN_OPT1", "UNKNOWN_OPT2"]
+ ]
+ )
+ self.env_assist.assert_reports([])
+
+
+class FailureHandling(TestCase):
+ #pylint: disable=too-many-instance-attributes
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ self.corosync_conf_name = "corosync.conf"
+ self.node_list = ["rh7-1", "rh7-2"]
+ self.sbd_config_generator = outdent("""\
+ # This file has been generated by pcs.
+ SBD_DELAY_START=no
+ SBD_OPTS="-n {0}"
+ SBD_PACEMAKER=yes
+ SBD_STARTMODE=always
+ SBD_WATCHDOG_DEV=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=5
+ """).format
+ self.watchdog = "/dev/watchdog"
+ self.reason = "failure reason"
+ self.communication_list_failure = [
+ dict(
+ label=self.node_list[0],
+ response_code=400,
+ output=self.reason,
+ ),
+ dict(
+ label=self.node_list[1],
)
- .http.add_communication(
- "remove_stonith_watchdog_timeout",
- [
- dict(label="node-1"),
- ],
- output="OK",
- response_code=200,
- action="remote/remove_stonith_watchdog_timeout",
+ ]
+ self.communication_list_not_connected = [
+ dict(
+ label=self.node_list[0],
+ errno=1,
+ error_msg=self.reason,
+ was_connected=False,
+ ),
+ dict(
+ label=self.node_list[1],
)
- .http.add_communication(
- "sbd_enable",
- [
- dict(label="node-1"),
- ],
- output="SBD enabled",
- response_code=200,
- action="remote/sbd_enable",
+ ]
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.config.http.host.check_auth(node_labels=self.node_list)
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ _check_sbd_comm_success_fixture(node, self.watchdog, [])
+ for node in self.node_list
+ ]
+ )
+ self.config.corosync_conf.load(
+ filename=self.corosync_conf_name, name="corosync_conf.load2",
+ )
+ self.config.http.corosync.check_corosync_offline(
+ node_labels=self.node_list
+ )
+ self.config.http.corosync.set_corosync_conf(
+ _get_corosync_conf_text_with_atb(self.corosync_conf_name),
+ node_labels=self.node_list,
+ )
+ self.config.runner.systemctl.is_active("corosync", is_active=False)
+ self.config.http.sbd.set_sbd_config(
+ config_generator=self.sbd_config_generator,
+ node_labels=self.node_list,
+ )
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ node_labels=[self.node_list[0]]
+ )
+
+ def _remove_calls(self, n):
+ for name in self.config.calls.names[-n:]:
+ self.config.calls.remove(name)
+
+ def test_enable_failed(self):
+ self.config.http.sbd.enable_sbd(
+ communication_list=self.communication_list_failure
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-3]
+ +
+ [
+ fixture.info(
+ report_codes.SERVICE_ENABLE_SUCCESS,
+ service="sbd",
+ node=self.node_list[1],
+ instance=None
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/sbd_enable"
+ )
+ ]
+ )
+
+ def test_enable_not_connected(self):
+ self.config.http.sbd.enable_sbd(
+ communication_list=self.communication_list_not_connected
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-3]
+ +
+ [
+ fixture.info(
+ report_codes.SERVICE_ENABLE_SUCCESS,
+ service="sbd",
+ node=self.node_list[1],
+ instance=None
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/sbd_enable"
+ )
+ ]
+ )
+
+ def test_removing_stonith_wd_timeout_failure(self):
+ self._remove_calls(2)
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ communication_list=[
+ self.communication_list_failure[:1],
+ [dict(label=self.node_list[1])]
+ ]
+ )
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
)
+ +
+ [
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/remove_stonith_watchdog_timeout",
+ )
+ ]
+ )
+
+ def test_removing_stonith_wd_timeout_not_connected(self):
+ self._remove_calls(2)
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ communication_list=[
+ self.communication_list_not_connected[:1],
+ [dict(label=self.node_list[1])]
+ ]
)
+ self.config.http.sbd.enable_sbd(node_labels=self.node_list)
enable_sbd(
self.env_assist.get_env(),
- default_watchdog=None,
+ default_watchdog=self.watchdog,
watchdog_dict={},
sbd_options={},
- ignore_offline_nodes=True,
)
- self.env_assist.assert_reports([
- fixture.info(report_codes.SBD_ENABLING_STARTED),
- fixture.warn(
- report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )
+ +
+ [
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/remove_stonith_watchdog_timeout",
+ )
+ ]
+ )
+
+ def test_removing_stonith_wd_timeout_complete_failure(self):
+ self._remove_calls(2)
+ self.config.http.pcmk.remove_stonith_watchdog_timeout(
+ communication_list=[
+ self.communication_list_not_connected[:1],
+ [dict(
+ label=self.node_list[1],
+ response_code=400,
+ output=self.reason,
+ )],
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
),
- fixture.warn(report_codes.OMITTING_NODE, node="node-2"),
- fixture.info(report_codes.SBD_CHECK_STARTED),
- fixture.info(report_codes.SBD_CHECK_SUCCESS, node="node-1"),
- fixture.info(report_codes.SBD_CONFIG_DISTRIBUTION_STARTED),
- fixture.info(
- report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
- node="node-1"
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-4]
+ +
+ [
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/remove_stonith_watchdog_timeout",
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[1],
+ reason=self.reason,
+ command="remote/remove_stonith_watchdog_timeout",
+ ),
+ fixture.error(
+ report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE,
+ )
+ ]
+ )
+
+ def test_set_sbd_config_failure(self):
+ self._remove_calls(4)
+ self.config.http.sbd.set_sbd_config(
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ param_list=[
+ ("config", self.sbd_config_generator(self.node_list[0]))
+ ],
+ response_code=400,
+ output=self.reason,
+ ),
+ dict(
+ label=self.node_list[1],
+ param_list=[
+ ("config", self.sbd_config_generator(self.node_list[1]))
+ ],
+ ),
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
),
- fixture.info(
- report_codes.SERVICE_ENABLE_SUCCESS,
- node="node-1",
- instance=None,
- service="sbd",
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-6]
+ +
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/set_sbd_config",
+ ),
+ fixture.info(
+ report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
+ node=self.node_list[1],
+ )
+ ]
+ )
+
+ def test_set_corosync_conf_failed(self):
+ self._remove_calls(7)
+ self.config.http.corosync.set_corosync_conf(
+ _get_corosync_conf_text_with_atb(self.corosync_conf_name),
+ communication_list=self.communication_list_failure,
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
),
- ])
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-9]
+ +
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/set_corosync_conf",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.error(
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ node=self.node_list[0],
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node=self.node_list[1],
+ )
+ ]
+ )
+
+ def test_set_corosync_conf_not_connected(self):
+ self._remove_calls(7)
+ self.config.http.corosync.set_corosync_conf(
+ _get_corosync_conf_text_with_atb(self.corosync_conf_name),
+ communication_list=self.communication_list_not_connected,
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-9]
+ +
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/set_corosync_conf",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.error(
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ node=self.node_list[0],
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node=self.node_list[1],
+ )
+ ]
+ )
+
+ def test_corosync_not_running_failed(self):
+ self._remove_calls(9)
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=self.communication_list_failure,
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-12]
+ +
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/status",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.error(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+ node=self.node_list[0],
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ node=self.node_list[1]
+ )
+ ]
+ )
+
+ def test_corosync_not_running_not_connected(self):
+ self._remove_calls(9)
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=self.communication_list_not_connected,
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ _sbd_enable_successful_report_list_fixture(
+ self.node_list, atb_set=True
+ )[:-12]
+ +
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/status",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.error(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+ node=self.node_list[0],
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ node=self.node_list[1]
+ )
+ ]
+ )
+
+ def test_check_sbd_invalid_data_format(self):
+ self._remove_calls(12)
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ param_list=[
+ ("watchdog", self.watchdog),
+ ("device_list", "[]"),
+ ],
+ output="{}",
+ ),
+ dict(
+ label=self.node_list[1],
+ param_list=[
+ ("watchdog", self.watchdog),
+ ("device_list", "[]"),
+ ],
+ output="not JSON",
+ ),
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.SBD_CHECK_STARTED)]
+ +
+ [
+ fixture.error(report_codes.INVALID_RESPONSE_FORMAT, node=node)
+ for node in self.node_list
+ ]
+ )
+
+ def test_check_sbd_failure(self):
+ self._remove_calls(12)
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ param_list=[
+ ("watchdog", self.watchdog),
+ ("device_list", "[]"),
+ ],
+ response_code=400,
+ output=self.reason,
+ ),
+ _check_sbd_comm_success_fixture(
+ self.node_list[1], self.watchdog, []
+ )
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.info(report_codes.SBD_CHECK_STARTED),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/check_sbd",
+ ),
+ fixture.info(
+ report_codes.SBD_CHECK_SUCCESS,
+ node=self.node_list[1]
+ )
+ ]
+ )
+
+ def test_check_sbd_not_connected(self):
+ self._remove_calls(12)
+ self.config.http.sbd.check_sbd(
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ param_list=[
+ ("watchdog", self.watchdog),
+ ("device_list", "[]"),
+ ],
+ errno=1,
+ error_msg=self.reason,
+ was_connected=False,
+ ),
+ _check_sbd_comm_success_fixture(
+ self.node_list[1], self.watchdog, []
+ )
+ ]
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.info(report_codes.SBD_CHECK_STARTED),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/check_sbd",
+ ),
+ fixture.info(
+ report_codes.SBD_CHECK_SUCCESS,
+ node=self.node_list[1]
+ )
+ ]
+ )
+
+ def test_get_online_targets_failed(self):
+ self._remove_calls(14)
+ self.config.http.host.check_auth(
+ communication_list=self.communication_list_failure
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/check_auth",
+ )
+ ]
+ )
+
+ def test_get_online_targets_not_connected(self):
+ self._remove_calls(14)
+ self.config.http.host.check_auth(
+ communication_list=self.communication_list_not_connected
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=self.watchdog,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ []
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/check_auth",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ )
+ ]
+ )
diff --git a/pcs/lib/commands/test/test_acl.py b/pcs/lib/commands/test/test_acl.py
index 827df08..b353761 100644
--- a/pcs/lib/commands/test/test_acl.py
+++ b/pcs/lib/commands/test/test_acl.py
@@ -5,13 +5,14 @@ from __future__ import (
)
import pcs.lib.commands.acl as cmd_acl
+from pcs.common.tools import Version
from pcs.lib.env import LibraryEnvironment
from pcs.test.tools.assertions import ExtendedAssertionsMixin
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import mock, TestCase
-REQUIRED_CIB_VERSION = (2, 0, 0)
+REQUIRED_CIB_VERSION = Version(2, 0, 0)
class AclCommandsTest(TestCase, ExtendedAssertionsMixin):
diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
index cba7378..d812e3a 100644
--- a/pcs/lib/commands/test/test_alert.py
+++ b/pcs/lib/commands/test/test_alert.py
@@ -238,7 +238,7 @@ class UpdateAlertTest(TestCase):
"context_type": "alerts",
"context_id": "",
"id": "unknown",
- "id_description": "alert",
+ "expected_types": ["alert"],
},
None
),
@@ -303,7 +303,7 @@ class RemoveAlertTest(TestCase):
"context_type": "alerts",
"context_id": "",
"id": "unknown1",
- "id_description": "alert",
+ "expected_types": ["alert"],
},
None
),
@@ -314,7 +314,7 @@ class RemoveAlertTest(TestCase):
"context_type": "alerts",
"context_id": "",
"id": "unknown2",
- "id_description": "alert",
+ "expected_types": ["alert"],
},
None
),
@@ -528,8 +528,11 @@ class UpdateRecipientTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "recipient",
- "id_description": "recipient"
- }
+ "expected_types": ["recipient"],
+ "context_id": "",
+ "context_type": "alerts",
+ },
+ None
)
],
)
diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
index cd43c2c..c9af6ff 100644
--- a/pcs/lib/commands/test/test_booth.py
+++ b/pcs/lib/commands/test/test_booth.py
@@ -5,11 +5,12 @@ from __future__ import (
)
import os
-import base64
+from collections import namedtuple
-from pcs.test.tools.pcs_unittest import TestCase, skip
+from pcs.test.tools.pcs_unittest import TestCase, mock
-from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.assertions import (
assert_raise_library_error,
@@ -18,13 +19,11 @@ from pcs.test.tools.assertions import (
from pcs.test.tools.misc import create_patcher
from pcs import settings
-from pcs.common import report_codes
+from pcs.common import report_codes, env_file_role_codes as file_roles
from pcs.lib.env import LibraryEnvironment
-from pcs.lib.node import NodeAddresses
from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
from pcs.lib.commands import booth as commands
from pcs.lib.external import (
- NodeCommunicator,
CommandRunner,
EnableServiceError,
DisableServiceError,
@@ -138,62 +137,356 @@ class ConfigDestroyTest(TestCase):
)
])
- at skip("TODO: rewrite using new testing fremework")
- at mock.patch("pcs.lib.commands.booth.config_structure.get_authfile")
- at mock.patch("pcs.lib.commands.booth.parse")
- at mock.patch("pcs.lib.booth.config_files.read_authfile")
- at mock.patch("pcs.lib.booth.sync.send_config_to_all_nodes")
+
class ConfigSyncTest(TestCase):
def setUp(self):
- self.mock_env = mock.MagicMock()
- self.mock_rep = MockLibraryReportProcessor()
- self.mock_env.report_processor = self.mock_rep
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_env.node_communicator.return_value = self.mock_com
- self.node_list = ["node1", "node2", "node3"]
- corosync_conf = mock.MagicMock()
- corosync_conf.get_nodes.return_value = self.node_list
- self.mock_env.get_corosync_conf.return_value = corosync_conf
- self.mock_env.booth.get_config_content.return_value = "config"
-
- def test_skip_offline(
- self, mock_sync, mock_read_key, mock_parse, mock_get_authfile
- ):
- mock_get_authfile.return_value = "/key/path.key"
- mock_read_key.return_value = "key"
- commands.config_sync(self.mock_env, "name", True)
- self.mock_env.booth.get_config_content.assert_called_once_with()
- mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key")
- mock_parse.assert_called_once_with("config")
- mock_sync.assert_called_once_with(
- self.mock_com,
- self.mock_rep,
- self.node_list,
- "name",
- "config",
- authfile="/key/path.key",
- authfile_data="key",
- skip_offline=True
- )
-
- def test_do_not_skip_offline(
- self, mock_sync, mock_read_key, mock_parse, mock_get_authfile
- ):
- mock_get_authfile.return_value = "/key/path.key"
- mock_read_key.return_value = "key"
- commands.config_sync(self.mock_env, "name")
- self.mock_env.booth.get_config_content.assert_called_once_with()
- mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key")
- mock_parse.assert_called_once_with("config")
- mock_sync.assert_called_once_with(
- self.mock_com,
- self.mock_rep,
- self.node_list,
- "name",
- "config",
- authfile="/key/path.key",
- authfile_data="key",
- skip_offline=False
+ self.env_assist, self.config = get_env_tools(self)
+ self.name = "booth"
+ self.config_path = os.path.join(
+ settings.booth_config_dir, "{}.conf".format(self.name)
+ )
+ self.node_list = ["rh7-1", "rh7-2"]
+ self.config.env.set_booth({"name": self.name})
+ self.reason = "fail"
+
+ def test_success(self):
+ auth_file = "auth.file"
+ auth_file_path = os.path.join(settings.booth_config_dir, auth_file)
+ config_content = "authfile={}".format(auth_file_path)
+ auth_file_content = b"auth"
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data=config_content)(),
+ name="open.conf"
+ )
+ .fs.open(
+ auth_file_path,
+ mock.mock_open(read_data=auth_file_content)(),
+ mode="rb",
+ name="open.authfile",
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, config_content,
+ authfile=auth_file,
+ authfile_data=auth_file_content,
+ node_labels=self.node_list,
+ )
+ )
+
+ commands.config_sync(self.env_assist.get_env(), self.name)
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED)]
+ +
+ [
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=node,
+ name_list=[self.name]
+ ) for node in self.node_list
+ ]
+ )
+
+ def test_node_failure(self):
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data="")(),
+ name="open.conf"
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, "",
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ response_code=400,
+ output=self.reason,
+ ),
+ dict(
+ label=self.node_list[1],
+ )
+ ]
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.config_sync(self.env_assist.get_env(), self.name),
+ []
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=self.node_list[1],
+ name_list=[self.name]
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/booth_set_config",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ ]
+ )
+
+ def test_node_failure_skip_offline(self):
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data="")(),
+ name="open.conf"
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, "",
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ response_code=400,
+ output=self.reason,
+ ),
+ dict(
+ label=self.node_list[1],
+ )
+ ]
+ )
+ )
+
+ commands.config_sync(
+ self.env_assist.get_env(), self.name, skip_offline_nodes=True
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=self.node_list[1],
+ name_list=[self.name]
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/booth_set_config",
+ ),
+ ]
+ )
+
+ def test_node_offline(self):
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data="")(),
+ name="open.conf"
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, "",
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ errno=1,
+ error_msg=self.reason,
+ was_connected=False,
+ ),
+ dict(
+ label=self.node_list[1],
+ )
+ ],
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.config_sync(self.env_assist.get_env(), self.name),
+ []
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=self.node_list[1],
+ name_list=[self.name]
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/booth_set_config",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ ),
+ ]
+ )
+
+ def test_node_offline_skip_offline(self):
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data="")(),
+ name="open.conf"
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, "",
+ communication_list=[
+ dict(
+ label=self.node_list[0],
+ errno=1,
+ error_msg=self.reason,
+ was_connected=False,
+ ),
+ dict(
+ label=self.node_list[1],
+ )
+ ],
+ )
+ )
+
+ commands.config_sync(
+ self.env_assist.get_env(), self.name, skip_offline_nodes=True
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=self.node_list[1],
+ name_list=[self.name]
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=self.node_list[0],
+ reason=self.reason,
+ command="remote/booth_set_config",
+ ),
+ ]
+ )
+
+ def test_config_not_accessible(self):
+ self.config.fs.open(
+ self.config_path,
+ side_effect=EnvironmentError(0, self.reason, self.config_path),
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.config_sync(self.env_assist.get_env(), self.name),
+ [
+ fixture.error(
+ report_codes.FILE_IO_ERROR,
+ reason="{}: '{}'".format(self.reason, self.config_path),
+ file_role=file_roles.BOOTH_CONFIG,
+ file_path=self.config_path,
+ operation="read",
+ )
+ ],
+ expected_in_processor=False,
+ )
+ self.env_assist.assert_reports([])
+
+ def test_authfile_not_accessible(self):
+ auth_file = "auth.file"
+ auth_file_path = os.path.join(settings.booth_config_dir, auth_file)
+ config_content = "authfile={}".format(auth_file_path)
+
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data=config_content)(),
+ name="open.conf"
+ )
+ .fs.open(
+ auth_file_path,
+ mode="rb",
+ name="open.authfile",
+ side_effect=EnvironmentError(0, self.reason, auth_file_path),
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, config_content, node_labels=self.node_list,
+ )
+ )
+
+ commands.config_sync(self.env_assist.get_env(), self.name)
+ self.env_assist.assert_reports(
+ [
+ fixture.warn(
+ report_codes.FILE_IO_ERROR,
+ reason="{}: '{}'".format(self.reason, auth_file_path),
+ file_role=file_roles.BOOTH_KEY,
+ file_path=auth_file_path,
+ operation="read",
+ ),
+ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED)
+ ]
+ +
+ [
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=node,
+ name_list=[self.name]
+ ) for node in self.node_list
+ ]
+ )
+
+ def test_no_authfile(self):
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data="")(),
+ name="open.conf"
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, "", node_labels=self.node_list,
+ )
+ )
+
+ commands.config_sync(self.env_assist.get_env(), self.name)
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED)]
+ +
+ [
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=node,
+ name_list=[self.name]
+ ) for node in self.node_list
+ ]
+ )
+
+ def test_authfile_not_in_booth_dir(self):
+ config_file_content = "authfile=/etc/my_booth.conf"
+
+ (self.config
+ .fs.open(
+ self.config_path,
+ mock.mock_open(read_data=config_file_content)(),
+ name="open.conf"
+ )
+ .corosync_conf.load()
+ .http.booth.send_config(
+ self.name, config_file_content, node_labels=self.node_list,
+ )
+ )
+
+ commands.config_sync(self.env_assist.get_env(), self.name)
+ self.env_assist.assert_reports(
+ [
+ fixture.warn(report_codes.BOOTH_UNSUPORTED_FILE_LOCATION),
+ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED)
+ ]
+ +
+ [
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=node,
+ name_list=[self.name]
+ ) for node in self.node_list
+ ]
)
@@ -380,129 +673,507 @@ class StopBoothTest(TestCase):
mock_stop.assert_called_once_with(self.mock_run, "booth", "name")
mock_is_systemctl.assert_called_once_with()
+def _get_booth_file_path(file):
+ return os.path.join(settings.booth_config_dir, file)
- at skip("TODO: rewrite using new testing fremework")
- at mock.patch("pcs.lib.booth.sync.pull_config_from_node")
-class PullConfigTest(TestCase):
+
+class PullConfigBase(TestCase):
def setUp(self):
- self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
- self.mock_rep = MockLibraryReportProcessor()
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_env.node_communicator.return_value = self.mock_com
- self.mock_env.report_processor = self.mock_rep
+ self.env_assist, self.config = get_env_tools(self)
+ self.name = "booth"
+ self.node_name = "node"
+ self.config_data = "config"
+ self.config_path = _get_booth_file_path("{}.conf".format(self.name))
+ self.report_list = [
+ fixture.info(
+ report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
+ node=self.node_name,
+ config=self.name
+ ),
+ fixture.info(
+ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+ node=None,
+ name_list=[self.name],
+ )
+ ]
+ self.config.env.set_booth({"name": self.name})
- def test_with_authfile(self, mock_pull):
- mock_pull.return_value = {
- "config": {
- "name": "name.conf",
- "data": "config"
- },
- "authfile": {
- "name": "name.key",
- "data": base64.b64encode("key".encode("utf-8")).decode("utf-8")
- }
- }
- commands.pull_config(self.mock_env, "node", "name")
- mock_pull.assert_called_once_with(
- self.mock_com, NodeAddresses("node"), "name"
- )
- self.mock_env.booth.create_config.called_once_with("config", True)
- self.mock_env.booth.set_key_path.called_once_with(os.path.join(
- settings.booth_config_dir, "name.key"
- ))
- self.mock_env.booth.create_key.called_once_with(
- "key".encode("utf-8"), True
+
+class PullConfigSuccess(PullConfigBase):
+ def setUp(self):
+ super(PullConfigSuccess, self).setUp()
+ self.booth_cfg_open_mock = mock.mock_open()()
+ (self.config
+ .http.booth.get_config(
+ self.name, self.config_data, node_labels=[self.node_name]
+ )
+ .fs.exists(self.config_path, False)
+ .fs.open(self.config_path, self.booth_cfg_open_mock, mode="w")
)
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
+ self.addCleanup(
+ lambda: self.booth_cfg_open_mock.write.assert_called_once_with(
+ self.config_data
+ )
+ )
+
+ def test_success(self):
+ commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ )
+
+ self.env_assist.assert_reports(self.report_list)
+
+ def test_success_config_exists(self):
+ self.config.fs.exists(self.config_path, True, instead="fs.exists")
+
+ commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ )
+
+ self.env_assist.assert_reports(
+ self.report_list
+ +
[
- (
- Severities.INFO,
- report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
- {
- "node": "node",
- "config": "name"
- }
+ fixture.warn(
+ report_codes.FILE_ALREADY_EXISTS,
+ node=None,
+ file_role=file_roles.BOOTH_CONFIG,
+ file_path=self.config_path,
),
- (
- Severities.INFO,
- report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
- {
- "node": None,
- "name_list": ["name"]
- }
- )
]
)
- def test_without_authfile(self, mock_pull):
- mock_pull.return_value = {
- "config": {
- "name": "name.conf",
- "data": "config"
- },
- "authfile": {
- "name": None,
- "data": None
- }
- }
- commands.pull_config(self.mock_env, "node", "name")
- mock_pull.assert_called_once_with(
- self.mock_com, NodeAddresses("node"), "name"
- )
- self.mock_env.booth.create_config.called_once_with("config", True)
- self.assertEqual(0, self.mock_env.booth.set_key_path.call_count)
- self.assertEqual(0, self.mock_env.booth.create_key.call_count)
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
+
+class PullConfigFailure(PullConfigBase):
+ reason = "reason"
+ def test_write_failure(self):
+ (self.config
+ .http.booth.get_config(
+ self.name, self.config_data, node_labels=[self.node_name]
+ )
+ .fs.exists(self.config_path, False)
+ .fs.open(
+ self.config_path,
+ mode="w",
+ side_effect=EnvironmentError(0, self.reason, self.config_path),
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
[
- (
- Severities.INFO,
- report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
- {
- "node": "node",
- "config": "name"
- }
- ),
- (
- Severities.INFO,
- report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
- {
- "node": None,
- "name_list": ["name"]
- }
+ fixture.error(
+ report_codes.FILE_IO_ERROR,
+ reason="{}: '{}'".format(self.reason, self.config_path),
+ file_role=file_roles.BOOTH_CONFIG,
+ file_path=self.config_path,
+ operation="write",
)
- ]
+ ],
+ expected_in_processor=False,
)
+ self.env_assist.assert_reports(self.report_list[:1])
- def test_invalid_input(self, mock_pull):
- mock_pull.return_value = {}
- assert_raise_library_error(
- lambda: commands.pull_config(self.mock_env, "node", "name"),
- (
- Severities.ERROR,
+ def test_network_failure(self):
+ self.config.http.booth.get_config(
+ self.name,
+ communication_list=[dict(
+ label=self.node_name,
+ was_connected=False,
+ errno=1,
+ error_msg=self.reason,
+ )]
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [],
+ )
+ self.env_assist.assert_reports([
+ self.report_list[0],
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ force_code=None,
+ node=self.node_name,
+ command="remote/booth_get_config",
+ reason=self.reason,
+ ),
+ ])
+
+ def test_network_request_failure(self):
+ self.config.http.booth.get_config(
+ self.name,
+ communication_list=[dict(
+ label=self.node_name,
+ response_code=400,
+ output=self.reason,
+ )]
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [],
+ )
+ self.env_assist.assert_reports([
+ self.report_list[0],
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ force_code=None,
+ node=self.node_name,
+ command="remote/booth_get_config",
+ reason=self.reason,
+ ),
+ ])
+
+ def test_request_response_not_json(self):
+ self.config.http.booth.get_config(
+ self.name,
+ communication_list=[dict(
+ label=self.node_name,
+ output="not json",
+ )]
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [],
+ )
+ self.env_assist.assert_reports([
+ self.report_list[0],
+ fixture.error(
report_codes.INVALID_RESPONSE_FORMAT,
- {"node": "node"}
+ node=self.node_name,
+ ),
+ ])
+
+ def test_request_response_missing_keys(self):
+ self.config.http.booth.get_config(
+ self.name,
+ communication_list=[dict(
+ label=self.node_name,
+ output="{'config':{}}",
+ )]
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [],
+ )
+ self.env_assist.assert_reports([
+ self.report_list[0],
+ fixture.error(
+ report_codes.INVALID_RESPONSE_FORMAT,
+ node=self.node_name,
+ ),
+ ])
+
+
+class PullConfigWithAuthfile(PullConfigBase):
+ def setUp(self):
+ super(PullConfigWithAuthfile, self).setUp()
+ self.booth_cfg_open_mock = mock.mock_open()()
+ self.authfile = "authfile"
+ self.authfile_path = _get_booth_file_path(self.authfile)
+ self.authfile_data = b"auth"
+ self.pcmk_uid = 2
+ self.pcmk_gid = 3
+
+ (self.config
+ .http.booth.get_config(
+ self.name,
+ self.config_data,
+ authfile=self.authfile,
+ authfile_data=self.authfile_data,
+ node_labels=[self.node_name],
)
+ .fs.exists(self.config_path, False)
+ .fs.open(self.config_path, self.booth_cfg_open_mock, mode="w")
+ .fs.exists(self.authfile_path, False, name="fs.exists.authfile")
)
- mock_pull.assert_called_once_with(
- self.mock_com, NodeAddresses("node"), "name"
+
+ self.addCleanup(
+ lambda: self.booth_cfg_open_mock.write.assert_called_once_with(
+ self.config_data
+ )
)
- self.assertEqual(0, self.mock_env.booth.create_config.call_count)
- self.assertEqual(0, self.mock_env.booth.set_key_path.call_count)
- self.assertEqual(0, self.mock_env.booth.create_key.call_count)
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
- {
- "node": "node",
- "config": "name"
- }
- )]
+
+ def _set_pwd_mock(self, pwd_mock):
+ pwd_mock.return_value = namedtuple("Pw", "pw_uid")(self.pcmk_uid)
+ self.addCleanup(
+ lambda: pwd_mock.assert_called_once_with(settings.pacemaker_uname)
+ )
+
+ def _set_grp_mock(self, grp_mock):
+ grp_mock.return_value = namedtuple("Gr", "gr_gid")(self.pcmk_gid)
+ self.addCleanup(
+ lambda: grp_mock.assert_called_once_with(settings.pacemaker_gname)
+ )
+
+
+ at mock.patch("grp.getgrnam")
+ at mock.patch("pwd.getpwnam")
+class PullConfigWithAuthfileSuccess(PullConfigWithAuthfile):
+ def setUp(self):
+ super(PullConfigWithAuthfileSuccess, self).setUp()
+ self.booth_authfile_open_mock = mock.mock_open()()
+
+ (self.config
+ .fs.open(
+ self.authfile_path,
+ self.booth_authfile_open_mock,
+ mode="wb",
+ name="fs.open.authfile.write"
+ )
+ .fs.chown(self.authfile_path, self.pcmk_uid, self.pcmk_gid)
+ .fs.chmod(self.authfile_path, settings.pacemaker_authkey_file_mode)
+ )
+
+ self.addCleanup(
+ lambda: self.booth_authfile_open_mock.write.assert_called_once_with(
+ self.authfile_data
+ )
+ )
+
+ def test_success(self, pwd_mock, grp_mock):
+ self._set_pwd_mock(pwd_mock)
+ self._set_grp_mock(grp_mock)
+
+ commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ )
+
+ self.env_assist.assert_reports(self.report_list)
+
+ def test_success_authfile_exists(self, pwd_mock, grp_mock):
+ self._set_pwd_mock(pwd_mock)
+ self._set_grp_mock(grp_mock)
+
+ self.config.fs.exists(
+ self.authfile_path, True,
+ name="fs.exists.authfile",
+ instead="fs.exists.authfile",
+ )
+
+ commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ )
+
+ self.env_assist.assert_reports(
+ self.report_list
+ +
+ [
+ fixture.warn(
+ report_codes.FILE_ALREADY_EXISTS,
+ node=None,
+ file_role=file_roles.BOOTH_KEY,
+ file_path=self.authfile_path,
+ )
+ ]
)
+ def test_success_config_and_authfile_exists(self, pwd_mock, grp_mock):
+ self._set_pwd_mock(pwd_mock)
+ self._set_grp_mock(grp_mock)
+
+ (self.config
+ .fs.exists(self.config_path, True, instead="fs.exists")
+ .fs.exists(
+ self.authfile_path, True,
+ name="fs.exists.authfile",
+ instead="fs.exists.authfile",
+ )
+ )
+
+ commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ )
+
+ self.env_assist.assert_reports(
+ self.report_list
+ +
+ [
+ fixture.warn(
+ report_codes.FILE_ALREADY_EXISTS,
+ node=None, file_role=role, file_path=path,
+ ) for role, path in [
+ (file_roles.BOOTH_CONFIG, self.config_path),
+ (file_roles.BOOTH_KEY, self.authfile_path)
+ ]
+ ]
+ )
+
+
+ at mock.patch("grp.getgrnam")
+ at mock.patch("pwd.getpwnam")
+class PullConfigWithAuthfileFailure(PullConfigWithAuthfile):
+ def setUp(self):
+ super(PullConfigWithAuthfileFailure, self).setUp()
+ self.reason = "reason"
+ self.booth_authfile_open_mock = mock.mock_open()()
+
+ def assert_authfile_written(self):
+ self.booth_authfile_open_mock.write.assert_called_once_with(
+ self.authfile_data
+ )
+
+ def test_authfile_write_failure(self, pwd_mock, grp_mock):
+ self.config.fs.open(
+ self.authfile_path,
+ mode="wb",
+ name="fs.open.authfile.write",
+ side_effect=EnvironmentError(1, self.reason, self.authfile_path)
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [
+ fixture.error(
+ report_codes.FILE_IO_ERROR,
+ reason="{}: '{}'".format(self.reason, self.authfile_path),
+ file_role=file_roles.BOOTH_KEY,
+ file_path=self.authfile_path,
+ operation="write",
+ )
+ ],
+ expected_in_processor=False,
+ )
+ self.env_assist.assert_reports(self.report_list[:1])
+
+ def test_unable_to_get_uid(self, pwd_mock, grp_mock):
+ pwd_mock.side_effect = KeyError()
+ self.config.fs.open(
+ self.authfile_path,
+ self.booth_authfile_open_mock,
+ mode="wb",
+ name="fs.open.authfile.write"
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [
+ fixture.error(
+ report_codes.UNABLE_TO_DETERMINE_USER_UID,
+ user=settings.pacemaker_uname,
+ )
+ ],
+ expected_in_processor=False,
+ )
+ self.assert_authfile_written()
+ pwd_mock.assert_called_once_with(settings.pacemaker_uname)
+ self.assertEqual(0, grp_mock.call_count)
+ self.env_assist.assert_reports(self.report_list[:1])
+
+ def test_unable_to_get_gid(self, pwd_mock, grp_mock):
+ self._set_pwd_mock(pwd_mock)
+ grp_mock.side_effect = KeyError()
+ self.config.fs.open(
+ self.authfile_path,
+ self.booth_authfile_open_mock,
+ mode="wb",
+ name="fs.open.authfile.write"
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [
+ fixture.error(
+ report_codes.UNABLE_TO_DETERMINE_GROUP_GID,
+ group=settings.pacemaker_gname,
+ )
+ ],
+ expected_in_processor=False,
+ )
+ self.assert_authfile_written()
+ grp_mock.assert_called_once_with(settings.pacemaker_gname)
+ self.env_assist.assert_reports(self.report_list[:1])
+
+ def test_unable_to_set_authfile_uid_gid(self, pwd_mock, grp_mock):
+ self._set_pwd_mock(pwd_mock)
+ self._set_grp_mock(grp_mock)
+ (self.config
+ .fs.open(
+ self.authfile_path,
+ self.booth_authfile_open_mock,
+ mode="wb",
+ name="fs.open.authfile.write"
+ )
+ .fs.chown(
+ self.authfile_path, self.pcmk_uid, self.pcmk_gid,
+ side_effect=EnvironmentError(1, self.reason, self.authfile_path)
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [
+ fixture.error(
+ report_codes.FILE_IO_ERROR,
+ reason="{}: '{}'".format(self.reason, self.authfile_path),
+ file_role=file_roles.BOOTH_KEY,
+ file_path=self.authfile_path,
+ operation="chown",
+ )
+ ],
+ expected_in_processor=False,
+ )
+ self.assert_authfile_written()
+ self.env_assist.assert_reports(self.report_list[:1])
+
+ def test_unable_to_set_authfile_mode(self, pwd_mock, grp_mock):
+ self._set_pwd_mock(pwd_mock)
+ self._set_grp_mock(grp_mock)
+ (self.config
+ .fs.open(
+ self.authfile_path,
+ self.booth_authfile_open_mock,
+ mode="wb",
+ name="fs.open.authfile.write"
+ )
+ .fs.chown(
+ self.authfile_path, self.pcmk_uid, self.pcmk_gid,
+ )
+ .fs.chmod(
+ self.authfile_path, settings.pacemaker_authkey_file_mode,
+ side_effect=EnvironmentError(1, self.reason, self.authfile_path)
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: commands.pull_config(
+ self.env_assist.get_env(), self.node_name, self.name
+ ),
+ [
+ fixture.error(
+ report_codes.FILE_IO_ERROR,
+ reason="{}: '{}'".format(self.reason, self.authfile_path),
+ file_role=file_roles.BOOTH_KEY,
+ file_path=self.authfile_path,
+ operation="chmod",
+ )
+ ],
+ expected_in_processor=False,
+ )
+ self.assert_authfile_written()
+ self.env_assist.assert_reports(self.report_list[:1])
+
+
class TicketOperationTest(TestCase):
@mock.patch("pcs.lib.booth.resource.find_bound_ip")
def test_raises_when_implicit_site_not_found_in_cib(
diff --git a/pcs/lib/commands/test/test_fencing_topology.py b/pcs/lib/commands/test/test_fencing_topology.py
index 42e8c29..605543d 100644
--- a/pcs/lib/commands/test/test_fencing_topology.py
+++ b/pcs/lib/commands/test/test_fencing_topology.py
@@ -11,6 +11,7 @@ from pcs.common.fencing_topology import (
TARGET_TYPE_REGEXP,
TARGET_TYPE_ATTRIBUTE,
)
+from pcs.common.tools import Version
from pcs.lib.env import LibraryEnvironment
from pcs.test.tools.misc import create_patcher
from pcs.test.tools.pcs_unittest import mock, TestCase
@@ -119,7 +120,7 @@ class AddLevel(TestCase):
"force device",
"force node"
)
- mock_get_cib.assert_called_once_with((2, 4, 0))
+ mock_get_cib.assert_called_once_with(Version(2, 4, 0))
self.assert_mocks(
mock_status_xml, mock_status, mock_get_topology, mock_get_resources,
mock_push_cib
@@ -152,7 +153,7 @@ class AddLevel(TestCase):
"force device",
"force node"
)
- mock_get_cib.assert_called_once_with((2, 3, 0))
+ mock_get_cib.assert_called_once_with(Version(2, 3, 0))
self.assert_mocks(
mock_status_xml, mock_status, mock_get_topology, mock_get_resources,
mock_push_cib
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/lib/commands/test/test_quorum.py
similarity index 73%
rename from pcs/test/test_lib_commands_quorum.py
rename to pcs/lib/commands/test/test_quorum.py
index 807e528..749b1be 100644
--- a/pcs/test/test_lib_commands_quorum.py
+++ b/pcs/lib/commands/test/test_quorum.py
@@ -19,14 +19,12 @@ from pcs.test.tools.misc import (
get_test_resource as rc,
outdent,
)
-from pcs.test.tools.pcs_unittest import mock, skip, TestCase
+from pcs.test.tools.pcs_unittest import mock, TestCase
from pcs.common import report_codes
from pcs.lib.env import LibraryEnvironment
from pcs.lib.errors import ReportItemSeverity as severity
from pcs.lib.corosync.config_facade import ConfigFacade
-from pcs.lib.external import NodeCommunicationException
-from pcs.lib.node import NodeAddresses, NodeAddressesList
from pcs.lib.commands import quorum as lib
@@ -452,7 +450,7 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
lambda: lib.set_options(lib_env, new_options),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["invalid"],
"option_type": "quorum",
@@ -589,12 +587,9 @@ class AddDeviceNetTest(TestCase):
# matter.
plain = open(cert_info["path"], "rb").read()
cert_info["data"] = plain
- # Convert bytes to string in python3, because the communicator does
- # it the same way - it accepts bytes, converts them to string and
- # passes that to further processing.
- cert_info["b64data"] = base64.b64encode(plain).decode("utf-8")
+ cert_info["b64data"] = base64.b64encode(plain)
- def fixture_config_http_get_ca_cert(self):
+ def fixture_config_http_get_ca_cert(self, output=None):
self.config.http.add_communication(
"http.get_ca_certificate",
[
@@ -602,7 +597,7 @@ class AddDeviceNetTest(TestCase):
],
action="remote/qdevice_net_get_ca_certificate",
response_code=200,
- output=self.certs["cacert"]["b64data"]
+ output=(output or self.certs["cacert"]["b64data"])
)
def fixture_config_http_client_init(self):
@@ -627,7 +622,7 @@ class AddDeviceNetTest(TestCase):
)
)
- def fixture_config_http_sign_cert_request(self):
+ def fixture_config_http_sign_cert_request(self, output=None):
self.config.http.add_communication(
"http.sign_certificate_request",
[
@@ -642,7 +637,7 @@ class AddDeviceNetTest(TestCase):
("cluster_name", self.cluster_name),
],
response_code=200,
- output=self.certs["signed_request"]["b64data"]
+ output=(output or self.certs["signed_request"]["b64data"])
)
def fixture_config_runner_cert_to_pk12(self, cert_file_path):
@@ -667,24 +662,6 @@ class AddDeviceNetTest(TestCase):
response_code=200,
)
- def fixture_config_http_qdevice_enable(self):
- self.config.http.add_communication(
- "http.qdevice_enable",
- [{"label": node} for node in self.cluster_nodes],
- action="remote/qdevice_client_enable",
- response_code=200,
- output="corosync-qdevice enabled"
- )
-
- def fixture_config_http_qdevice_start(self):
- self.config.http.add_communication(
- "http.qdevice_start",
- [{"label": node} for node in self.cluster_nodes],
- action="remote/qdevice_client_start",
- response_code=200,
- output="corosync-qdevice started"
- )
-
def fixture_config_success(
self, expected_corosync_conf, cert_to_pk12_cert_path
):
@@ -696,11 +673,15 @@ class AddDeviceNetTest(TestCase):
self.fixture_config_http_sign_cert_request()
self.fixture_config_runner_cert_to_pk12(cert_to_pk12_cert_path)
self.fixture_config_http_import_final_cert()
- self.fixture_config_http_qdevice_enable()
+ self.config.http.corosync.qdevice_client_enable(
+ node_labels=self.cluster_nodes
+ )
self.config.env.push_corosync_conf(
corosync_conf_text=expected_corosync_conf
)
- self.fixture_config_http_qdevice_start()
+ self.config.http.corosync.qdevice_client_start(
+ node_labels=self.cluster_nodes
+ )
def fixture_reports_success(self):
return [
@@ -720,7 +701,8 @@ class AddDeviceNetTest(TestCase):
fixture.info(
report_codes.SERVICE_ENABLE_SUCCESS,
node=node,
- service="corosync-qdevice"
+ service="corosync-qdevice",
+ instance=None
)
for node in self.cluster_nodes
] + [
@@ -732,7 +714,8 @@ class AddDeviceNetTest(TestCase):
fixture.info(
report_codes.SERVICE_START_SUCCESS,
node=node,
- service="corosync-qdevice"
+ service="corosync-qdevice",
+ instance=None
)
for node in self.cluster_nodes
]
@@ -861,6 +844,117 @@ class AddDeviceNetTest(TestCase):
@mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
@mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+ def test_success_corosync_not_running_not_enabled(self, mock_write_tmpfile):
+ tmpfile_instance = mock.MagicMock()
+ tmpfile_instance.name = rc("file.tmp")
+ mock_write_tmpfile.return_value = tmpfile_instance
+
+ expected_corosync_conf = open(
+ rc(self.corosync_conf_name)
+ ).read().replace(
+ " provider: corosync_votequorum\n",
+ outdent("""\
+ provider: corosync_votequorum
+
+ device {
+ model: net
+ votes: 1
+
+ net {
+ algorithm: ffsplit
+ host: qnetd-host
+ }
+ }
+ """
+ )
+ )
+
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.fixture_config_http_get_ca_cert()
+ self.fixture_config_http_client_init()
+ self.fixture_config_runner_get_cert_request()
+ self.fixture_config_http_sign_cert_request()
+ self.fixture_config_runner_cert_to_pk12(tmpfile_instance.name)
+ self.fixture_config_http_import_final_cert()
+ self.config.http.corosync.qdevice_client_enable(
+ communication_list=[
+ {
+ "label": label,
+ "output": "corosync is not enabled, skipping",
+ }
+ for label in self.cluster_nodes
+ ]
+ )
+ self.config.env.push_corosync_conf(
+ corosync_conf_text=expected_corosync_conf
+ )
+ self.config.http.corosync.qdevice_client_start(
+ communication_list=[
+ {
+ "label": label,
+ "output": "corosync is not running, skipping",
+ }
+ for label in self.cluster_nodes
+ ]
+ )
+
+ lib.add_device(
+ self.env_assist.get_env(),
+ "net",
+ {"host": self.qnetd_host, "algorithm": "ffsplit"},
+ {},
+ {}
+ )
+
+ mock_write_tmpfile.assert_called_once_with(
+ self.certs["signed_request"]["data"],
+ binary=True
+ )
+ self.env_assist.assert_reports(
+ [
+ fixture.info(
+ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED
+ ),
+ ] + [
+ fixture.info(
+ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+ node=node
+ )
+ for node in self.cluster_nodes
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_ENABLE_STARTED,
+ service="corosync-qdevice"
+ ),
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_ENABLE_SKIPPED,
+ node=node,
+ service="corosync-qdevice",
+ instance=None,
+ reason="corosync is not enabled"
+ )
+ for node in self.cluster_nodes
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_START_STARTED,
+ service="corosync-qdevice"
+ ),
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_START_SKIPPED,
+ node=node,
+ service="corosync-qdevice",
+ instance=None,
+ reason="corosync is not running"
+ )
+ for node in self.cluster_nodes
+ ]
+ )
+
+ @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+ @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
def test_success_heuristics_no_exec(self, mock_write_tmpfile):
tmpfile_instance = mock.MagicMock()
tmpfile_instance.name = rc("file.tmp")
@@ -1070,22 +1164,14 @@ class AddDeviceNetTest(TestCase):
],
response_code=200,
)
- self.config.http.add_communication(
- "http.qdevice_enable",
- node_2_offline_responses,
- action="remote/qdevice_client_enable",
- response_code=200,
- output="corosync-qdevice enabled"
+ self.config.http.corosync.qdevice_client_enable(
+ communication_list=node_2_offline_responses
)
self.config.env.push_corosync_conf(
corosync_conf_text=expected_corosync_conf
)
- self.config.http.add_communication(
- "http.qdevice_start",
- node_2_offline_responses,
- action="remote/qdevice_client_start",
- response_code=200,
- output="corosync-qdevice started"
+ self.config.http.corosync.qdevice_client_start(
+ communication_list=node_2_offline_responses
)
lib.add_device(
@@ -1271,7 +1357,7 @@ class AddDeviceNetTest(TestCase):
),
[
fixture.error(
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
force_code=report_codes.FORCE_OPTIONS,
option_names=["bad_option"],
option_type="quorum device",
@@ -1286,7 +1372,7 @@ class AddDeviceNetTest(TestCase):
allowed_values=("off", "on", "sync")
),
fixture.error(
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
force_code=report_codes.FORCE_OPTIONS,
option_names=["bad_heur"],
option_type="heuristics",
@@ -1346,11 +1432,15 @@ class AddDeviceNetTest(TestCase):
self.fixture_config_http_sign_cert_request()
self.fixture_config_runner_cert_to_pk12(tmpfile_instance.name)
self.fixture_config_http_import_final_cert()
- self.fixture_config_http_qdevice_enable()
+ self.config.http.corosync.qdevice_client_enable(
+ node_labels=self.cluster_nodes
+ )
self.config.env.push_corosync_conf(
corosync_conf_text=expected_corosync_conf
)
- self.fixture_config_http_qdevice_start()
+ self.config.http.corosync.qdevice_client_start(
+ node_labels=self.cluster_nodes
+ )
lib.add_device(
self.env_assist.get_env(),
@@ -1363,7 +1453,7 @@ class AddDeviceNetTest(TestCase):
self.env_assist.assert_reports([
fixture.warn(
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
option_names=["bad_option"],
option_type="quorum device",
allowed=["sync_timeout", "timeout"],
@@ -1376,7 +1466,7 @@ class AddDeviceNetTest(TestCase):
allowed_values=("off", "on", "sync")
),
fixture.warn(
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
option_names=["bad_heur"],
option_type="heuristics",
allowed=["interval", "mode", "sync_timeout", "timeout"],
@@ -1458,11 +1548,15 @@ class AddDeviceNetTest(TestCase):
self.config.runner.corosync.version()
self.config.corosync_conf.load(filename=self.corosync_conf_name)
# model is not "net" - do not set up certificates
- self.fixture_config_http_qdevice_enable()
+ self.config.http.corosync.qdevice_client_enable(
+ node_labels=self.cluster_nodes
+ )
self.config.env.push_corosync_conf(
corosync_conf_text=expected_corosync_conf
)
- self.fixture_config_http_qdevice_start()
+ self.config.http.corosync.qdevice_client_start(
+ node_labels=self.cluster_nodes
+ )
lib.add_device(
self.env_assist.get_env(),
@@ -1506,7 +1600,7 @@ class AddDeviceNetTest(TestCase):
for node in self.cluster_nodes
])
- def test_error_get_ca_cert(self):
+ def test_get_ca_cert_error_communication(self):
self.config.runner.corosync.version()
self.config.corosync_conf.load(filename=self.corosync_conf_name)
self.config.http.add_communication(
@@ -1543,6 +1637,35 @@ class AddDeviceNetTest(TestCase):
)
])
+ def test_get_ca_cert_error_decode_certificate(self):
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.fixture_config_http_get_ca_cert(
+ output="invalid base64 encoded certificate data"
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.add_device(
+ self.env_assist.get_env(),
+ "net",
+ {"host": self.qnetd_host, "algorithm": "ffsplit"},
+ {"timeout": "20"},
+ {},
+ skip_offline_nodes=True # test that this does not matter
+ ),
+ [], # an empty LibraryError is raised
+ expected_in_processor=False
+ )
+
+ self.env_assist.assert_reports([
+ fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+ fixture.error(
+ report_codes.INVALID_RESPONSE_FORMAT,
+ force_code=None,
+ node=self.qnetd_host,
+ )
+ ])
+
def test_error_client_setup(self):
self.config.runner.corosync.version()
self.config.corosync_conf.load(filename=self.corosync_conf_name)
@@ -1627,7 +1750,7 @@ class AddDeviceNetTest(TestCase):
])
@mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
- def test_sign_certificate_error(self):
+ def test_sign_certificate_error_communication(self):
self.config.runner.corosync.version()
self.config.corosync_conf.load(filename=self.corosync_conf_name)
self.fixture_config_http_get_ca_cert()
@@ -1674,6 +1797,38 @@ class AddDeviceNetTest(TestCase):
])
@mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+ def test_sign_certificate_error_decode_certificate(self):
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load(filename=self.corosync_conf_name)
+ self.fixture_config_http_get_ca_cert()
+ self.fixture_config_http_client_init()
+ self.fixture_config_runner_get_cert_request()
+ self.fixture_config_http_sign_cert_request(
+ output="invalid base64 encoded certificate data"
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.add_device(
+ self.env_assist.get_env(),
+ "net",
+ {"host": "qnetd-host", "algorithm": "ffsplit"},
+ {"timeout": "20"},
+ {}
+ ),
+ [], # an empty LibraryError is raised
+ expected_in_processor=False
+ )
+
+ self.env_assist.assert_reports([
+ fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+ fixture.error(
+ report_codes.INVALID_RESPONSE_FORMAT,
+ force_code=None,
+ node=self.qnetd_host,
+ )
+ ])
+
+ @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
@mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
def test_certificate_to_pk12_error(self, mock_write_tmpfile):
tmpfile_instance = mock.MagicMock()
@@ -1837,466 +1992,548 @@ class RemoveDeviceHeuristics(TestCase):
- at skip("TODO: rewrite using new testing fremework")
- at mock.patch.object(LibraryEnvironment, "push_corosync_conf")
- at mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
- at mock.patch("pcs.lib.commands.quorum._remove_device_model_net")
- at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_disable")
- at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_stop")
-class RemoveDeviceTest(TestCase, CmanMixin):
+ at mock.patch("pcs.lib.external.is_systemctl", lambda: True)
+class RemoveDeviceNetTest(TestCase):
def setUp(self):
- self.mock_logger = mock.MagicMock(logging.Logger)
- self.mock_reporter = MockLibraryReportProcessor()
+ self.env_assist, self.config = get_env_tools(self)
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
- def test_disabled_on_cman(
- self, mock_remote_stop, mock_remote_disable, mock_remove_net,
- mock_get_corosync, mock_push_corosync
- ):
- lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- self.assert_disabled_on_cman(lambda: lib.remove_device(lib_env))
- mock_get_corosync.assert_not_called()
- mock_push_corosync.assert_not_called()
- mock_remove_net.assert_not_called()
- mock_remote_disable.assert_not_called()
- mock_remote_stop.assert_not_called()
+ def conf_2nodes(self, quorum_line):
+ cluster_nodes = ["rh7-1", "rh7-2"]
+ original_conf = open(rc("corosync-qdevice.conf")).read()
+ expected_conf = original_conf.replace(
+ outdent("""\
+ quorum {
+ provider: corosync_votequorum
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
- def test_enabled_on_cman_if_not_live(
- self, mock_remote_stop, mock_remote_disable, mock_remove_net,
- mock_get_corosync, mock_push_corosync
- ):
- original_conf = open(rc("corosync-3nodes.conf")).read()
- mock_get_corosync.return_value = original_conf
- lib_env = LibraryEnvironment(
- self.mock_logger,
- self.mock_reporter,
- corosync_conf_data=original_conf
- )
+ device {
+ model: net
- assert_raise_library_error(
- lambda: lib.remove_device(lib_env),
- (
- severity.ERROR,
- report_codes.QDEVICE_NOT_DEFINED,
- {}
+ net {
+ host: 127.0.0.1
+ }
+ }
+ }
+ """
+ ),
+ # cluster consists of two nodes, two_node must be set
+ outdent("""\
+ quorum {
+ provider: corosync_votequorum
+ """ + quorum_line + """
+ }
+ """
)
)
+ return cluster_nodes, original_conf, expected_conf
- self.assertEqual(1, mock_get_corosync.call_count)
- self.assertEqual(0, mock_push_corosync.call_count)
- mock_remove_net.assert_not_called()
- mock_remote_disable.assert_not_called()
- mock_remote_stop.assert_not_called()
+ def conf_3nodes(self):
+ cluster_nodes = ["rh7-1", "rh7-2", "rh7-3"]
+ original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
+ expected_conf = original_conf.replace(
+ outdent("""\
+ quorum {
+ provider: corosync_votequorum
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_no_device(
- self, mock_remote_stop, mock_remote_disable, mock_remove_net,
- mock_get_corosync, mock_push_corosync
- ):
- original_conf = open(rc("corosync-3nodes.conf")).read()
- mock_get_corosync.return_value = original_conf
- lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ device {
+ model: net
- assert_raise_library_error(
- lambda: lib.remove_device(lib_env),
- (
- severity.ERROR,
- report_codes.QDEVICE_NOT_DEFINED,
- {}
+ net {
+ host: 127.0.0.1
+ }
+ }
+ }
+ """
+ ),
+ outdent("""\
+ quorum {
+ provider: corosync_votequorum
+ }
+ """
)
)
+ return cluster_nodes, original_conf, expected_conf
- self.assertEqual(1, mock_get_corosync.call_count)
- self.assertEqual(0, mock_push_corosync.call_count)
- mock_remove_net.assert_not_called()
- mock_remote_disable.assert_not_called()
- mock_remote_stop.assert_not_called()
+ def fixture_config_http_qdevice_net_destroy(self, nodes, responses=None):
+ responses = responses or [{"label": node} for node in nodes]
+ self.config.http.add_communication(
+ "http.qdevice_net_destroy",
+ responses,
+ action="remote/qdevice_net_client_destroy",
+ response_code=200
+ )
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
- @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
- @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
- def test_success_3nodes_sbd(
- self, mock_remote_stop, mock_remote_disable, mock_remove_net,
- mock_get_corosync, mock_push_corosync
- ):
- # nothing special needs to be done in regards of SBD if a cluster
- # consists of odd number of nodes
- original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
- no_device_conf = open(rc("corosync-3nodes.conf")).read()
- mock_get_corosync.return_value = original_conf
- lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ def fixture_config_runner_sbd_installed(self, sbd_installed):
+ units = {
+ "non_sbd": "enabled",
+ }
+ if sbd_installed:
+ units["sbd"] = "enabled" # enabled/disabled doesn't matter
+ self.config.runner.systemctl.list_unit_files(
+ units,
+ before="http.corosync.qdevice_client_disable_requests",
+ )
- lib.remove_device(lib_env)
+ def fixture_config_runner_sbd_enabled(self, sbd_enabled):
+ self.config.runner.systemctl.is_enabled(
+ "sbd",
+ sbd_enabled,
+ before="http.corosync.qdevice_client_disable_requests",
+ )
- self.assertEqual(1, len(mock_push_corosync.mock_calls))
- ac(
- mock_push_corosync.mock_calls[0][1][0].config.export(),
- no_device_conf
+ def fixture_config_success(
+ self, cluster_nodes, original_corosync_conf, expected_corosync_conf
+ ):
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load_content(original_corosync_conf)
+ self.config.http.corosync.qdevice_client_disable(
+ node_labels=cluster_nodes
)
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.SERVICE_DISABLE_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- (
- severity.INFO,
- report_codes.SERVICE_STOP_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- ]
+ self.config.http.corosync.qdevice_client_stop(node_labels=cluster_nodes)
+ self.fixture_config_http_qdevice_net_destroy(cluster_nodes)
+ self.config.env.push_corosync_conf(
+ corosync_conf_text=expected_corosync_conf
)
- self.assertEqual(1, len(mock_remove_net.mock_calls))
- self.assertEqual(3, len(mock_remote_disable.mock_calls))
- self.assertEqual(3, len(mock_remote_stop.mock_calls))
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: False)
- @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: False)
- @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
- def test_success_2nodes_no_sbd(
- self, mock_remote_stop, mock_remote_disable, mock_remove_net,
- mock_get_corosync, mock_push_corosync
+ def fixture_config_success_sbd_part(
+ self, sbd_installed, sbd_enabled
):
- # cluster consists of two nodes, two_node must be set
- original_conf = open(rc("corosync-qdevice.conf")).read()
- no_device_conf = open(rc("corosync.conf")).read()
- mock_get_corosync.return_value = original_conf
- lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- lib.remove_device(lib_env)
+ self.fixture_config_runner_sbd_installed(sbd_installed)
+ if sbd_installed:
+ self.fixture_config_runner_sbd_enabled(sbd_enabled)
+
+ def fixture_reports_success(self, cluster_nodes, atb_enabled=False):
+ reports = []
+ if atb_enabled:
+ reports.append(
+ fixture.warn(report_codes.SBD_REQUIRES_ATB)
+ )
+ reports += [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_STARTED,
+ service="corosync-qdevice",
+ ),
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ node=node,
+ service="corosync-qdevice",
+ )
+ for node in cluster_nodes
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_STOP_STARTED,
+ service="corosync-qdevice",
+ ),
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node=node,
+ service="corosync-qdevice",
+ )
+ for node in cluster_nodes
+ ] + [
+ fixture.info(report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED),
+ ] + [
+ fixture.info(
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ node=node,
+ )
+ for node in cluster_nodes
+ ]
+ return reports
- self.assertEqual(1, len(mock_push_corosync.mock_calls))
- ac(
- mock_push_corosync.mock_calls[0][1][0].config.export(),
- no_device_conf
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_disabled_on_cman(self):
+ self.config.runner.corosync.version(version="1.4.7")
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.remove_device(self.env_assist.get_env()),
+ [
+ fixture.error(report_codes.CMAN_UNSUPPORTED_COMMAND),
+ ],
+ expected_in_processor=False
)
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
+
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_does_not_check_cman_if_not_live(self):
+ (self.config
+ .env.set_corosync_conf_data(open(rc("corosync-3nodes.conf")).read())
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.remove_device(self.env_assist.get_env()),
[
- (
- severity.INFO,
- report_codes.SERVICE_DISABLE_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- (
- severity.INFO,
- report_codes.SERVICE_STOP_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- ]
+ fixture.error(report_codes.QDEVICE_NOT_DEFINED),
+ ],
+ expected_in_processor=False
)
- self.assertEqual(1, len(mock_remove_net.mock_calls))
- self.assertEqual(2, len(mock_remote_disable.mock_calls))
- self.assertEqual(2, len(mock_remote_stop.mock_calls))
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
- @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
@mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
- def test_success_2nodes_sbd(
- self, mock_remote_stop, mock_remote_disable, mock_remove_net,
- mock_get_corosync, mock_push_corosync
- ):
- # cluster consists of two nodes, but SBD is in use
- # auto tie breaker must be enabled
- original_conf = open(rc("corosync-qdevice.conf")).read()
- no_device_conf = open(rc("corosync.conf")).read().replace(
- "two_node: 1",
- "auto_tie_breaker: 1"
+ def test_fail_if_device_not_set(self):
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load_content(
+ open(rc("corosync-3nodes.conf")).read()
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.remove_device(self.env_assist.get_env()),
+ [
+ fixture.error(report_codes.QDEVICE_NOT_DEFINED),
+ ],
+ expected_in_processor=False
)
- mock_get_corosync.return_value = original_conf
- lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- lib.remove_device(lib_env)
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_success_2nodes_no_sbd(self):
+ # cluster consists of two nodes, two_node must be set
+ cluster_nodes, original_conf, expected_conf = self.conf_2nodes(
+ "two_node: 1"
+ )
+ self.fixture_config_success(cluster_nodes, original_conf, expected_conf)
+ self.fixture_config_success_sbd_part(False, False)
+ lib.remove_device(self.env_assist.get_env())
+ self.env_assist.assert_reports(
+ self.fixture_reports_success(cluster_nodes)
+ )
- self.assertEqual(1, len(mock_push_corosync.mock_calls))
- ac(
- mock_push_corosync.mock_calls[0][1][0].config.export(),
- no_device_conf
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_success_2nodes_sbd_installed_disabled(self):
+ # cluster consists of two nodes, two_node must be set
+ cluster_nodes, original_conf, expected_conf = self.conf_2nodes(
+ "two_node: 1"
)
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.WARNING,
- report_codes.SBD_REQUIRES_ATB,
- {}
- ),
- (
- severity.INFO,
- report_codes.SERVICE_DISABLE_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- (
- severity.INFO,
- report_codes.SERVICE_STOP_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- ]
+ self.fixture_config_success(cluster_nodes, original_conf, expected_conf)
+ self.fixture_config_success_sbd_part(True, False)
+ lib.remove_device(self.env_assist.get_env())
+ self.env_assist.assert_reports(
+ self.fixture_reports_success(cluster_nodes, atb_enabled=False)
)
- self.assertEqual(1, len(mock_remove_net.mock_calls))
- self.assertEqual(2, len(mock_remote_disable.mock_calls))
- self.assertEqual(2, len(mock_remote_stop.mock_calls))
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
- @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
- @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: ["/dev"])
- def test_success_2nodes_sbd_with_device(
- self, mock_remote_stop, mock_remote_disable, mock_remove_net,
- mock_get_corosync, mock_push_corosync
- ):
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_success_2nodes_sbd_enabled(self):
+ # cluster consists of two nodes and SBD is in use, so teo_nodes must be
+ # disabled and auto_tie_breaker must be enabled
+ cluster_nodes, original_conf, expected_conf = self.conf_2nodes(
+ "auto_tie_breaker: 1"
+ )
+ self.fixture_config_success(cluster_nodes, original_conf, expected_conf)
+ self.fixture_config_success_sbd_part(True, True)
+ lib.remove_device(self.env_assist.get_env())
+ self.env_assist.assert_reports(
+ self.fixture_reports_success(cluster_nodes, atb_enabled=True)
+ )
+
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: ["/dev/sdb"])
+ def test_success_2nodes_sbd_enabled_with_devices(self):
# cluster consists of two nodes, but SBD with shared storage is in use
# auto tie breaker doesn't need to be enabled
- original_conf = open(rc("corosync-qdevice.conf")).read()
- no_device_conf = open(rc("corosync.conf")).read()
- mock_get_corosync.return_value = original_conf
- lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- lib.remove_device(lib_env)
-
- self.assertEqual(1, len(mock_push_corosync.mock_calls))
- ac(
- mock_push_corosync.mock_calls[0][1][0].config.export(),
- no_device_conf
+ cluster_nodes, original_conf, expected_conf = self.conf_2nodes(
+ "two_node: 1"
)
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.SERVICE_DISABLE_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- (
- severity.INFO,
- report_codes.SERVICE_STOP_STARTED,
- {
- "service": "corosync-qdevice",
- }
- ),
- ]
+ self.fixture_config_success(cluster_nodes, original_conf, expected_conf)
+ self.fixture_config_success_sbd_part(True, True)
+ lib.remove_device(self.env_assist.get_env())
+ self.env_assist.assert_reports(
+ self.fixture_reports_success(cluster_nodes, atb_enabled=False)
)
- self.assertEqual(1, len(mock_remove_net.mock_calls))
- self.assertEqual(2, len(mock_remote_disable.mock_calls))
- self.assertEqual(2, len(mock_remote_stop.mock_calls))
- @mock.patch("pcs.lib.sbd.atb_has_to_be_enabled")
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- def test_success_file(
- self, mock_atb_check, mock_remote_stop, mock_remote_disable,
- mock_remove_net, mock_get_corosync, mock_push_corosync
- ):
- original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
- no_device_conf = open(rc("corosync-3nodes.conf")).read()
- mock_get_corosync.return_value = original_conf
- lib_env = LibraryEnvironment(
- self.mock_logger,
- self.mock_reporter,
- corosync_conf_data=original_conf
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_success_3nodes(self):
+ # with odd number of nodes it doesn't matter if sbd is used
+ cluster_nodes, original_conf, expected_conf = self.conf_3nodes()
+ self.fixture_config_success(cluster_nodes, original_conf, expected_conf)
+ lib.remove_device(self.env_assist.get_env())
+ self.env_assist.assert_reports(
+ self.fixture_reports_success(cluster_nodes)
)
- lib.remove_device(lib_env)
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_success_3nodes_file(self):
+ # with odd number of nodes it doesn't matter if sbd is used
+ dummy_cluster_nodes, original_conf, expected_conf = self.conf_3nodes()
+ (self.config
+ .env.set_corosync_conf_data(original_conf)
+ .env.push_corosync_conf(corosync_conf_text=expected_conf)
+ )
+ lib.remove_device(self.env_assist.get_env())
+ self.env_assist.assert_reports([])
- self.assertEqual(1, len(mock_push_corosync.mock_calls))
- ac(
- mock_push_corosync.mock_calls[0][1][0].config.export(),
- no_device_conf
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_success_3nodes_one_node_offline(self):
+ # with odd number of nodes it doesn't matter if sbd is used
+ cluster_nodes, original_conf, expected_conf = self.conf_3nodes()
+ node_2_offline_msg = (
+ "Failed connect to {0}:2224; No route to host".format(
+ cluster_nodes[1]
+ )
)
- self.assertEqual([], self.mock_reporter.report_item_list)
- mock_remove_net.assert_not_called()
- mock_remote_disable.assert_not_called()
- mock_remote_stop.assert_not_called()
- mock_atb_check.assert_not_called()
+ node_2_offline_responses = [
+ {"label": cluster_nodes[0]},
+ {
+ "label": cluster_nodes[1],
+ "was_connected": False,
+ "errno": 7,
+ "error_msg": node_2_offline_msg,
+ },
+ {"label": cluster_nodes[2]},
+ ]
+ def node_2_offline_warning(command):
+ return fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node=cluster_nodes[1],
+ reason=node_2_offline_msg,
+ command=command
+ )
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load_content(original_conf)
+ self.config.http.corosync.qdevice_client_disable(
+ communication_list=node_2_offline_responses
+ )
+ self.config.http.corosync.qdevice_client_stop(
+ communication_list=node_2_offline_responses
+ )
+ self.fixture_config_http_qdevice_net_destroy(
+ cluster_nodes, node_2_offline_responses
+ )
+ self.config.env.push_corosync_conf(
+ corosync_conf_text=expected_conf
+ )
- at skip("TODO: rewrite using new testing fremework")
- at mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy")
- at mock.patch.object(
- LibraryEnvironment,
- "node_communicator",
- lambda self: "mock_communicator"
-)
-class RemoveDeviceNetTest(TestCase):
- def setUp(self):
- self.mock_logger = mock.MagicMock(logging.Logger)
- self.mock_reporter = MockLibraryReportProcessor()
- self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- self.nodes = NodeAddressesList([
- NodeAddresses("node1"),
- NodeAddresses("node2"),
- ])
+ lib.remove_device(self.env_assist.get_env(), skip_offline_nodes=True)
- def test_success(self, mock_client_destroy):
- skip_offline_nodes = False
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.SERVICE_DISABLE_STARTED,
+ service="corosync-qdevice",
+ ),
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ node=cluster_nodes[0],
+ service="corosync-qdevice",
+ ),
+ node_2_offline_warning("remote/qdevice_client_disable"),
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ node=cluster_nodes[2],
+ service="corosync-qdevice",
+ ),
+ fixture.info(
+ report_codes.SERVICE_STOP_STARTED,
+ service="corosync-qdevice",
+ ),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node=cluster_nodes[0],
+ service="corosync-qdevice",
+ ),
+ node_2_offline_warning("remote/qdevice_client_stop"),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node=cluster_nodes[2],
+ service="corosync-qdevice",
+ ),
+ fixture.info(report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED),
+ fixture.info(
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ node=cluster_nodes[0],
+ ),
+ node_2_offline_warning("remote/qdevice_net_client_destroy"),
+ fixture.info(
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ node=cluster_nodes[2],
+ ),
+ ])
- lib._remove_device_model_net(
- self.lib_env,
- self.nodes,
- skip_offline_nodes
- )
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_error_disable_qdevice(self):
+ cluster_nodes, original_conf, dummy_expected_conf = self.conf_3nodes()
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
- {}
- ),
- (
- severity.INFO,
- report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
- {
- "node": self.nodes[0].label
- }
- ),
- (
- severity.INFO,
- report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
- {
- "node": self.nodes[1].label
- }
- ),
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load_content(original_conf)
+ self.config.http.corosync.qdevice_client_disable(
+ communication_list=[
+ {"label": cluster_nodes[0]},
+ {
+ "label": cluster_nodes[1],
+ "response_code": 400,
+ "output": "some error occurred",
+ },
+ {"label": cluster_nodes[2]},
]
)
- client_destroy_calls = [
- mock.call("mock_communicator", self.nodes[0]),
- mock.call("mock_communicator", self.nodes[1]),
- ]
- self.assertEqual(
- len(client_destroy_calls),
- len(mock_client_destroy.mock_calls)
- )
- mock_client_destroy.assert_has_calls(
- client_destroy_calls,
- any_order=True
- )
- def test_error_client_destroy(self, mock_client_destroy):
- def raiser(communicator, node):
- if node == self.nodes[1]:
- raise NodeCommunicationException("host", "command", "reason")
- mock_client_destroy.side_effect = raiser
- skip_offline_nodes = False
-
- assert_raise_library_error(
- lambda: lib._remove_device_model_net(
- self.lib_env,
- self.nodes,
- skip_offline_nodes
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.remove_device(
+ self.env_assist.get_env(),
+ skip_offline_nodes=False
),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR,
- {},
- report_codes.SKIP_OFFLINE_NODES
- )
+ [], # an empty LibraryError is raised
+ expected_in_processor=False
)
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
- {}
- ),
- (
- severity.INFO,
- report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
- {
- "node": self.nodes[0].label
- }
- ),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR,
- {},
- report_codes.SKIP_OFFLINE_NODES
- ),
- ]
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.SERVICE_DISABLE_STARTED,
+ service="corosync-qdevice",
+ ),
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ node=cluster_nodes[0],
+ service="corosync-qdevice",
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node=cluster_nodes[1],
+ command="remote/qdevice_client_disable",
+ reason="some error occurred",
+ ),
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ node=cluster_nodes[2],
+ service="corosync-qdevice",
+ ),
+ ])
+
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_error_stop_qdevice(self):
+ cluster_nodes, original_conf, dummy_expected_conf = self.conf_3nodes()
+
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load_content(original_conf)
+ self.config.http.corosync.qdevice_client_disable(
+ node_labels=cluster_nodes
)
- client_destroy_calls = [
- mock.call("mock_communicator", self.nodes[0]),
- mock.call("mock_communicator", self.nodes[1]),
- ]
- self.assertEqual(
- len(client_destroy_calls),
- len(mock_client_destroy.mock_calls)
+ self.config.http.corosync.qdevice_client_stop(
+ communication_list=[
+ {"label": cluster_nodes[0]},
+ {
+ "label": cluster_nodes[1],
+ "response_code": 400,
+ "output": "some error occurred",
+ },
+ {"label": cluster_nodes[2]},
+ ],
)
- mock_client_destroy.assert_has_calls(
- client_destroy_calls,
- any_order=True
+
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.remove_device(
+ self.env_assist.get_env(),
+ skip_offline_nodes=False
+ ),
+ [], # an empty LibraryError is raised
+ expected_in_processor=False
)
- def test_error_client_destroy_skip_offline(self, mock_client_destroy):
- def raiser(communicator, node):
- if node == self.nodes[1]:
- raise NodeCommunicationException("host", "command", "reason")
- mock_client_destroy.side_effect = raiser
- skip_offline_nodes = True
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.SERVICE_DISABLE_STARTED,
+ service="corosync-qdevice",
+ ),
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ node=node,
+ service="corosync-qdevice",
+ )
+ for node in cluster_nodes
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_STOP_STARTED,
+ service="corosync-qdevice",
+ ),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node=cluster_nodes[0],
+ service="corosync-qdevice",
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node=cluster_nodes[1],
+ command="remote/qdevice_client_stop",
+ reason="some error occurred",
+ ),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node=cluster_nodes[2],
+ service="corosync-qdevice",
+ ),
+ ])
- lib._remove_device_model_net(
- self.lib_env,
- self.nodes,
- skip_offline_nodes
- )
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
+ def test_error_destroy_qdevice_net(self):
+ cluster_nodes, original_conf, dummy_expected_conf = self.conf_3nodes()
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
- {}
- ),
- (
- severity.INFO,
- report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
- {
- "node": self.nodes[0].label
- }
- ),
- (
- severity.WARNING,
- report_codes.NODE_COMMUNICATION_ERROR,
- {}
- ),
- ]
+ self.config.runner.corosync.version()
+ self.config.corosync_conf.load_content(original_conf)
+ self.config.http.corosync.qdevice_client_disable(
+ node_labels=cluster_nodes
)
- client_destroy_calls = [
- mock.call("mock_communicator", self.nodes[0]),
- mock.call("mock_communicator", self.nodes[1]),
- ]
- self.assertEqual(
- len(client_destroy_calls),
- len(mock_client_destroy.mock_calls)
+ self.config.http.corosync.qdevice_client_stop(node_labels=cluster_nodes)
+ self.fixture_config_http_qdevice_net_destroy(
+ cluster_nodes,
+ [
+ {"label": cluster_nodes[0]},
+ {
+ "label": cluster_nodes[1],
+ "response_code": 400,
+ "output": "some error occurred",
+ },
+ {"label": cluster_nodes[2]},
+ ],
)
- mock_client_destroy.assert_has_calls(
- client_destroy_calls,
- any_order=True
+
+ self.env_assist.assert_raise_library_error(
+ lambda: lib.remove_device(
+ self.env_assist.get_env(),
+ skip_offline_nodes=False
+ ),
+ [], # an empty LibraryError is raised
+ expected_in_processor=False
)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.SERVICE_DISABLE_STARTED,
+ service="corosync-qdevice",
+ ),
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_DISABLE_SUCCESS,
+ node=node,
+ service="corosync-qdevice",
+ )
+ for node in cluster_nodes
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_STOP_STARTED,
+ service="corosync-qdevice",
+ ),
+ ] + [
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node=node,
+ service="corosync-qdevice",
+ )
+ for node in cluster_nodes
+ ] + [
+ fixture.info(report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED),
+ fixture.info(
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ node=cluster_nodes[0],
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node=cluster_nodes[1],
+ command="remote/qdevice_net_client_destroy",
+ reason="some error occurred",
+ ),
+ fixture.info(
+ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
+ node=cluster_nodes[2],
+ ),
+ ])
+
@mock.patch.object(LibraryEnvironment, "push_corosync_conf")
@mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
@@ -2436,7 +2673,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_option"],
"option_type": "quorum device",
@@ -2476,7 +2713,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
[
(
severity.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_option"],
"option_type": "quorum device",
diff --git a/pcs/lib/commands/test/test_stonith.py b/pcs/lib/commands/test/test_stonith.py
new file mode 100644
index 0000000..912742f
--- /dev/null
+++ b/pcs/lib/commands/test/test_stonith.py
@@ -0,0 +1,188 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.common import report_codes
+from pcs.lib.commands import stonith
+from pcs.lib.resource_agent import StonithAgent
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+class Create(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.agent_name = "test_simple"
+ self.instance_name = "stonith-test"
+ self.timeout = 10
+ self.expected_cib = """
+ <resources>
+ <primitive class="stonith" id="stonith-test" type="test_simple">
+ <instance_attributes id="stonith-test-instance_attributes">
+ <nvpair id="stonith-test-instance_attributes-must-set"
+ name="must-set" value="value"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="stonith-test-monitor-interval-60s"
+ interval="60s" name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ self.expected_status = """
+ <resources>
+ <resource
+ id="{id}"
+ resource_agent="stonith:{agent}"
+ role="Started"
+ active="true"
+ failed="false"
+ nodes_running_on="1"
+ >
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ </resources>
+ """.format(id=self.instance_name, agent=self.agent_name)
+ (self.config
+ .runner.pcmk.load_agent(
+ agent_name="stonith:{0}".format(self.agent_name),
+ agent_filename="stonith_agent_fence_simple.xml"
+ )
+ .runner.cib.load()
+ .runner.pcmk.load_stonithd_metadata()
+ )
+
+ def tearDown(self):
+ StonithAgent.clear_stonithd_metadata_cache()
+
+ def test_minimal_success(self):
+ self.config.env.push_cib(resources=self.expected_cib)
+ stonith.create(
+ self.env_assist.get_env(),
+ self.instance_name,
+ self.agent_name,
+ operations=[],
+ meta_attributes={},
+ instance_attributes={"must-set": "value"}
+ )
+
+ def test_minimal_wait_ok_run_ok(self):
+ (self.config
+ .runner.pcmk.can_wait(before="runner.cib.load")
+ .env.push_cib(
+ resources=self.expected_cib,
+ wait=self.timeout
+ )
+ .runner.pcmk.load_state(resources=self.expected_status)
+ )
+ stonith.create(
+ self.env_assist.get_env(),
+ self.instance_name,
+ self.agent_name,
+ operations=[],
+ meta_attributes={},
+ instance_attributes={"must-set": "value"},
+ wait=self.timeout
+ )
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": ["node1"]},
+ resource_id=self.instance_name,
+ ),
+ ])
+
+
+class CreateInGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.agent_name = "test_simple"
+ self.instance_name = "stonith-test"
+ self.timeout = 10
+ self.expected_cib = """
+ <resources>
+ <group id="my-group">
+ <primitive class="stonith" id="stonith-test" type="test_simple">
+ <instance_attributes id="stonith-test-instance_attributes">
+ <nvpair id="stonith-test-instance_attributes-must-set"
+ name="must-set" value="value"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="stonith-test-monitor-interval-60s"
+ interval="60s" name="monitor"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ """
+ self.expected_status = """
+ <resources>
+ <resource
+ id="{id}"
+ resource_agent="stonith:{agent}"
+ role="Started"
+ active="true"
+ failed="false"
+ nodes_running_on="1"
+ >
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ </resources>
+ """.format(id=self.instance_name, agent=self.agent_name)
+ (self.config
+ .runner.pcmk.load_agent(
+ agent_name="stonith:{0}".format(self.agent_name),
+ agent_filename="stonith_agent_fence_simple.xml"
+ )
+ .runner.cib.load()
+ .runner.pcmk.load_stonithd_metadata()
+ )
+
+ def tearDown(self):
+ StonithAgent.clear_stonithd_metadata_cache()
+
+ def test_minimal_success(self):
+ self.config.env.push_cib(resources=self.expected_cib)
+ stonith.create_in_group(
+ self.env_assist.get_env(),
+ self.instance_name,
+ self.agent_name,
+ "my-group",
+ operations=[],
+ meta_attributes={},
+ instance_attributes={"must-set": "value"}
+ )
+
+ def test_minimal_wait_ok_run_ok(self):
+ (self.config
+ .runner.pcmk.can_wait(before="runner.cib.load")
+ .env.push_cib(
+ resources=self.expected_cib,
+ wait=self.timeout
+ )
+ .runner.pcmk.load_state(resources=self.expected_status)
+ )
+ stonith.create_in_group(
+ self.env_assist.get_env(),
+ self.instance_name,
+ self.agent_name,
+ "my-group",
+ operations=[],
+ meta_attributes={},
+ instance_attributes={"must-set": "value"},
+ wait=self.timeout
+ )
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": ["node1"]},
+ resource_id=self.instance_name,
+ ),
+ ])
diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
index 956985e..0951142 100644
--- a/pcs/lib/commands/test/test_ticket.py
+++ b/pcs/lib/commands/test/test_ticket.py
@@ -73,8 +73,11 @@ class CreateTest(TestCase):
"context_type": "cib",
"context_id": "",
"id": "resourceA",
- "id_description": "resource"
+ "expected_types": [
+ "bundle", "clone", "group", "master", "primitive"
+ ],
},
+ None
),
)
diff --git a/pcs/lib/communication/booth.py b/pcs/lib/communication/booth.py
index 240738a..a3aa918 100644
--- a/pcs/lib/communication/booth.py
+++ b/pcs/lib/communication/booth.py
@@ -10,6 +10,7 @@ import os
from pcs.common.node_communicator import RequestData
from pcs.lib import reports
+from pcs.lib.booth import reports as reports_booth
from pcs.lib.communication.tools import (
AllAtOnceStrategyMixin,
AllSameDataMixin,
@@ -51,12 +52,12 @@ class BoothSendConfig(
)
def _get_success_report(self, node_label):
- return reports.booth_config_accepted_by_node(
+ return reports_booth.booth_config_accepted_by_node(
node_label, [self._booth_name]
)
def before(self):
- self._report(reports.booth_config_distribution_started())
+ self._report(reports_booth.booth_config_distribution_started())
class ProcessJsonDataMixin(object):
diff --git a/pcs/lib/communication/qdevice_net.py b/pcs/lib/communication/qdevice_net.py
index 0786ae1..9d45ecf 100644
--- a/pcs/lib/communication/qdevice_net.py
+++ b/pcs/lib/communication/qdevice_net.py
@@ -63,6 +63,7 @@ class ClientSetup(
[("ca_certificate", base64.b64encode(self._ca_cert))]
)
+
class SignCertificate(AllAtOnceStrategyMixin, RunRemotelyBase):
def __init__(self, report_processor):
super(SignCertificate, self).__init__(report_processor)
diff --git a/pcs/lib/communication/sbd.py b/pcs/lib/communication/sbd.py
index 1a7f1b1..628ed08 100644
--- a/pcs/lib/communication/sbd.py
+++ b/pcs/lib/communication/sbd.py
@@ -75,8 +75,11 @@ class StonithWatchdogTimeoutAction(
return RequestData(self._get_request_action())
def _process_response(self, response):
- report = response_to_report_item(response)
+ report = response_to_report_item(
+ response, severity=ReportItemSeverity.WARNING
+ )
if report is None:
+ self._on_success()
return
self._report(report)
return self._get_next_list()
diff --git a/pcs/lib/communication/test/test_booth.py b/pcs/lib/communication/test/test_booth.py
new file mode 100644
index 0000000..a931779
--- /dev/null
+++ b/pcs/lib/communication/test/test_booth.py
@@ -0,0 +1,29 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase, skip
+
+
+class BoothSendConfig(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_booth.ConfigSyncTest
+ """
+
+class BoothGetConfig(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_booth.PullConfigSuccess
+ pcs.lib.commands.test.test_booth.PullConfigFailure
+ pcs.lib.commands.test.test_booth.PullConfigWithAuthfileSuccess
+ pcs.lib.commands.test.test_booth.PullConfigWithAuthfileFailure
+ """
+
+
+ at skip("TODO: missing tests for pcs.lib.communication.booth.BoothSaveFiles")
+class BoothSaveFiles(TestCase):
+ def test_skip(self):
+ pass
diff --git a/pcs/lib/communication/test/test_corosync.py b/pcs/lib/communication/test/test_corosync.py
new file mode 100644
index 0000000..3c6a13b
--- /dev/null
+++ b/pcs/lib/communication/test/test_corosync.py
@@ -0,0 +1,21 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+class CheckCorosyncOffline(TestCase):
+ """
+ tested in:
+ pcs.lib.test.test_env.PushCorosyncConfLiveNoQdeviceTest
+ pcs.lib.commands.test.sbd.test_enable_sbd
+ """
+
+class DistributeCorosyncConf(TestCase):
+ """
+ tested in:
+ pcs.lib.test.test_env.PushCorosyncConfLiveNoQdeviceTest
+ pcs.lib.commands.test.sbd.test_enable_sbd
+ """
diff --git a/pcs/lib/communication/test/test_nodes.py b/pcs/lib/communication/test/test_nodes.py
index 75ecb6f..328d6f8 100644
--- a/pcs/lib/communication/test/test_nodes.py
+++ b/pcs/lib/communication/test/test_nodes.py
@@ -14,6 +14,13 @@ from pcs.lib.errors import ReportItemSeverity as severity
from pcs.lib.communication import nodes
+class GetOnlineTargets(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_enable_sbd
+ """
+
+
class AvailabilityCheckerNode(TestCase):
def setUp(self):
self.node = "node1"
diff --git a/pcs/lib/communication/test/test_qdevice.py b/pcs/lib/communication/test/test_qdevice.py
new file mode 100644
index 0000000..f23c599
--- /dev/null
+++ b/pcs/lib/communication/test/test_qdevice.py
@@ -0,0 +1,34 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.lib.communication import qdevice
+from pcs.test.tools.pcs_unittest import TestCase
+
+class Stop(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.RemoveDeviceNetTest
+ pcs.lib.test.test_env.PushCorosyncConfLiveWithQdeviceTest
+ """
+
+class Start(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.AddDeviceNetTest
+ pcs.lib.test.test_env.PushCorosyncConfLiveWithQdeviceTest
+ """
+
+class Enable(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.AddDeviceNetTest
+ """
+
+class Disable(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.RemoveDeviceNetTest
+ """
diff --git a/pcs/lib/communication/test/test_qdevice_net.py b/pcs/lib/communication/test/test_qdevice_net.py
new file mode 100644
index 0000000..0cb0277
--- /dev/null
+++ b/pcs/lib/communication/test/test_qdevice_net.py
@@ -0,0 +1,38 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.lib.communication import qdevice_net
+from pcs.test.tools.pcs_unittest import TestCase
+
+class GetCaCert(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.AddDeviceNetTest
+ """
+
+class ClientSetup(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.AddDeviceNetTest
+ """
+
+class SignCertificate(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.AddDeviceNetTest
+ """
+
+class ClientImportCertificateAndKey(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.AddDeviceNetTest
+ """
+
+class ClientDestroy(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.test_quorum.RemoveDeviceNetTest
+ """
diff --git a/pcs/lib/communication/test/test_sbd.py b/pcs/lib/communication/test/test_sbd.py
new file mode 100644
index 0000000..33b3880
--- /dev/null
+++ b/pcs/lib/communication/test/test_sbd.py
@@ -0,0 +1,56 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+class EnableSbdService(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_enable_sbd
+ """
+
+class DisableSbdService(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_disable_sbd.DisableSbd
+ """
+
+class RemoveStonithWatchdogTimeout(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_enable_sbd
+ """
+
+class SetStonithWatchdogTimeoutToZero(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_disable_sbd.DisableSbd
+ """
+
+class SetSbdConfig(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_enable_sbd
+ """
+
+class GetSbdConfig(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_get_cluster_sbd_config.GetClusterSbdConfig
+ """
+
+class GetSbdStatus(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_get_cluster_sbd_status.GetClusterSbdStatus
+ """
+
+class CheckSbd(TestCase):
+ """
+ tested in:
+ pcs.lib.commands.test.sbd.test_enable_sbd
+ """
diff --git a/pcs/lib/communication/tools.py b/pcs/lib/communication/tools.py
index 115836b..43c3d99 100644
--- a/pcs/lib/communication/tools.py
+++ b/pcs/lib/communication/tools.py
@@ -6,6 +6,7 @@ from __future__ import (
from pcs.common import report_codes
from pcs.common.node_communicator import Request
+from pcs.lib import reports
from pcs.lib.node_communication import response_to_report_item
from pcs.lib.errors import (
LibraryError,
@@ -173,6 +174,7 @@ class OneByOneStrategyMixin(StrategyBase):
"""
#pylint: disable=abstract-method
__iter = None
+ __successful = False
def get_initial_request_list(self):
"""
@@ -192,6 +194,14 @@ class OneByOneStrategyMixin(StrategyBase):
except StopIteration:
return []
+ def _on_success(self):
+ self.__successful = True
+
+ def on_complete(self):
+ if not self.__successful:
+ self._report(reports.unable_to_perform_operation_on_any_node())
+ return None
+
class AllAtOnceStrategyMixin(StrategyBase):
"""
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
index b033976..0c4da0b 100644
--- a/pcs/lib/corosync/config_facade.py
+++ b/pcs/lib/corosync/config_facade.py
@@ -148,7 +148,7 @@ class ConfigFacade(object):
allowed_names = self.__class__.QUORUM_OPTIONS
if name not in allowed_names:
report_items.append(
- reports.invalid_option([name], allowed_names, "quorum")
+ reports.invalid_options([name], allowed_names, "quorum")
)
continue
@@ -470,7 +470,7 @@ class ConfigFacade(object):
for name, value in sorted(model_options.items()):
if name not in allowed_options:
- report_items.append(reports.invalid_option(
+ report_items.append(reports.invalid_options(
[name],
allowed_options,
"quorum device model",
@@ -570,7 +570,7 @@ class ConfigFacade(object):
if name not in allowed_options:
# model is never allowed in generic options, it is passed
# in its own argument
- report_items.append(reports.invalid_option(
+ report_items.append(reports.invalid_options(
[name],
allowed_options,
"quorum device",
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
index 3d31211..46eb946 100644
--- a/pcs/lib/env.py
+++ b/pcs/lib/env.py
@@ -11,8 +11,10 @@ from pcs.common.node_communicator import (
NodeCommunicatorFactory,
NodeTargetFactory
)
+from pcs.common.tools import Version
from pcs.lib import reports
from pcs.lib.booth.env import BoothEnv
+from pcs.lib.cib.tools import get_cib_crm_feature_set
from pcs.lib.pacemaker.env import PacemakerEnv
from pcs.lib.cluster_conf_facade import ClusterConfFacade
from pcs.lib.communication import qdevice
@@ -95,6 +97,7 @@ class LibraryEnvironment(object):
self._cib_upgrade_reported = False
self._cib_data_tmp_file = None
self.__loaded_cib_diff_source = None
+ self.__loaded_cib_diff_source_feature_set = None
self.__loaded_cib_to_modify = None
self._communicator_factory = NodeCommunicatorFactory(
LibCommunicatorLogger(self.logger, self.report_processor),
@@ -146,6 +149,14 @@ class LibraryEnvironment(object):
reports.cib_upgrade_successful()
)
self._cib_upgrade_reported = True
+ self.__loaded_cib_diff_source_feature_set = (
+ get_cib_crm_feature_set(
+ self.__loaded_cib_to_modify,
+ none_if_missing=True
+ )
+ or
+ Version(0, 0, 0)
+ )
return self.__loaded_cib_to_modify
@property
@@ -178,30 +189,47 @@ class LibraryEnvironment(object):
self._get_wait_timeout(wait)
def push_cib(self, custom_cib=None, wait=False):
- if custom_cib is not None:
- return self.push_cib_full(custom_cib, wait)
- return self.push_cib_diff(wait)
+ """
+ Push previously loaded instance of CIB or a custom CIB
- def push_cib_full(self, custom_cib=None, wait=False):
- if custom_cib is None and self.__loaded_cib_diff_source is None:
+ etree custom_cib -- push a custom CIB instead of a loaded instance
+ (allows to push an externally provided CIB and replace the one in
+ the cluster completely)
+ mixed wait -- how many seconds to wait for pacemaker to process new CIB
+ or False for not waiting at all
+ """
+ if custom_cib is not None:
+ if self.__loaded_cib_diff_source is not None:
+ raise AssertionError(
+ "CIB has been loaded, cannot push custom CIB"
+ )
+ return self.__push_cib_full(custom_cib, wait)
+ if self.__loaded_cib_diff_source is None:
raise AssertionError("CIB has not been loaded")
- if custom_cib is not None and self.__loaded_cib_diff_source is not None:
- raise AssertionError("CIB has been loaded, cannot push custom CIB")
+ # Push by diff works with crm_feature_set > 3.0.8, see
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1488044 for details. We
+ # only check the version if a CIB has been loaded, otherwise the push
+ # fails anyway. By my testing it seems that only the source CIB's
+ # version matters.
+ if self.__loaded_cib_diff_source_feature_set < Version(3, 0, 9):
+ self.report_processor.process(
+ reports.cib_push_forced_full_due_to_crm_feature_set(
+ Version(3, 0, 9),
+ self.__loaded_cib_diff_source_feature_set
+ )
+ )
+ return self.__push_cib_full(self.__loaded_cib_to_modify, wait=wait)
+ return self.__push_cib_diff(wait=wait)
+ def __push_cib_full(self, cib_to_push, wait=False):
cmd_runner = self.cmd_runner()
- cib_to_push = (
- self.__loaded_cib_to_modify if custom_cib is None else custom_cib
- )
self.__do_push_cib(
cmd_runner,
lambda: replace_cib_configuration(cmd_runner, cib_to_push),
wait
)
- def push_cib_diff(self, wait=False):
- if self.__loaded_cib_diff_source is None:
- raise AssertionError("CIB has not been loaded")
-
+ def __push_cib_diff(self, wait=False):
cmd_runner = self.cmd_runner()
self.__do_push_cib(
cmd_runner,
@@ -216,7 +244,6 @@ class LibraryEnvironment(object):
self.__loaded_cib_diff_source,
etree_to_str(self.__loaded_cib_to_modify)
)
-
if cib_diff_xml:
push_cib_diff_xml(cmd_runner, cib_diff_xml)
@@ -225,6 +252,7 @@ class LibraryEnvironment(object):
push_strategy()
self._cib_upgrade_reported = False
self.__loaded_cib_diff_source = None
+ self.__loaded_cib_diff_source_feature_set = None
self.__loaded_cib_to_modify = None
if self.is_cib_live and timeout is not False:
wait_for_idle(cmd_runner, timeout)
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
index 4ef5d6b..5507543 100644
--- a/pcs/lib/external.py
+++ b/pcs/lib/external.py
@@ -357,7 +357,7 @@ class CommandRunner(object):
# executables must be specified with full path unless the PATH variable
# is set from outside.
self._env_vars = env_vars if env_vars else dict()
- self._python2 = sys.version[0] == "2"
+ self._python2 = (sys.version_info.major == 2)
@property
def env_vars(self):
diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py
index 8e21a96..2afc9d0 100644
--- a/pcs/lib/pacemaker/live.py
+++ b/pcs/lib/pacemaker/live.py
@@ -163,9 +163,9 @@ def ensure_cib_version(runner, cib, version):
Returns cib which was verified by pacemaker version 'version' or later.
Raises LibraryError on any failure.
- CommandRunner runner
- etree cib cib tree
- tuple version tuple of integers (<major>, <minor>, <revision>)
+ CommandRunner runner -- runner
+ etree cib -- cib tree
+ pcs.common.tools.Version version -- required cib version
"""
current_version = get_pacemaker_version_by_which_cib_was_validated(cib)
if current_version >= version:
diff --git a/pcs/lib/pacemaker/test/test_live.py b/pcs/lib/pacemaker/test/test_live.py
index a5a2eb7..904e349 100644
--- a/pcs/lib/pacemaker/test/test_live.py
+++ b/pcs/lib/pacemaker/test/test_live.py
@@ -20,6 +20,7 @@ from pcs.test.tools.xml import XmlManipulation
from pcs import settings
from pcs.common import report_codes
+from pcs.common.tools import Version
import pcs.lib.pacemaker.live as lib
from pcs.lib.errors import ReportItemSeverity as Severity
from pcs.lib.external import CommandRunner
@@ -355,7 +356,7 @@ class EnsureCibVersionTest(TestCase):
def test_same_version(self, mock_upgrade, mock_get_cib):
self.assertTrue(
lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 4)
+ self.mock_runner, self.cib, Version(2, 3, 4)
) is None
)
mock_upgrade.assert_not_called()
@@ -364,7 +365,7 @@ class EnsureCibVersionTest(TestCase):
def test_higher_version(self, mock_upgrade, mock_get_cib):
self.assertTrue(
lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 3)
+ self.mock_runner, self.cib, Version(2, 3, 3)
) is None
)
mock_upgrade.assert_not_called()
@@ -377,7 +378,7 @@ class EnsureCibVersionTest(TestCase):
upgraded_cib,
etree.tostring(
lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 5)
+ self.mock_runner, self.cib, Version(2, 3, 5)
)
).decode()
)
@@ -391,7 +392,7 @@ class EnsureCibVersionTest(TestCase):
upgraded_cib,
etree.tostring(
lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 5)
+ self.mock_runner, self.cib, Version(2, 3, 5)
)
).decode()
)
@@ -402,7 +403,7 @@ class EnsureCibVersionTest(TestCase):
mock_get_cib.return_value = etree.tostring(self.cib).decode()
assert_raise_library_error(
lambda: lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 5)
+ self.mock_runner, self.cib, Version(2, 3, 5)
),
(
Severity.ERROR,
@@ -420,7 +421,7 @@ class EnsureCibVersionTest(TestCase):
mock_get_cib.return_value = "not xml"
assert_raise_library_error(
lambda: lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 5)
+ self.mock_runner, self.cib, Version(2, 3, 5)
),
(
Severity.ERROR,
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index fcb42f1..6ccaaf8 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -164,7 +164,7 @@ def required_option_is_missing(
"""
required option has not been specified, command cannot continue
list name is/are required but was not entered
- option_type decsribes the option
+ option_type describes the option
severity report item severity
forceable is this report item forceable? by what cathegory?
"""
@@ -185,8 +185,8 @@ def prerequisite_option_is_missing(
if the option_name is specified, the prerequisite_option must be specified
string option_name -- an option which depends on the prerequisite_option
string prerequisite_name -- the prerequisite option
- string option_type -- decsribes the option
- string prerequisite_type -- decsribes the prerequisite_option
+ string option_type -- describes the option
+ string prerequisite_type -- describes the prerequisite_option
"""
return ReportItem.error(
report_codes.PREREQUISITE_OPTION_IS_MISSING,
@@ -204,7 +204,7 @@ def required_option_of_alternatives_is_missing(
"""
at least one option has to be specified
iterable option_names -- options from which at least one has to be specified
- string option_type -- decsribes the option
+ string option_type -- describes the option
"""
severity = ReportItemSeverity.ERROR
forceable = None
@@ -218,7 +218,7 @@ def required_option_of_alternatives_is_missing(
}
)
-def invalid_option(
+def invalid_options(
option_names, allowed_options, option_type, allowed_option_patterns=None,
severity=ReportItemSeverity.ERROR, forceable=None
):
@@ -233,7 +233,7 @@ def invalid_option(
mixed forceable -- is this report item forceable? by what cathegory?
"""
return ReportItem(
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
severity,
forceable,
info={
@@ -251,7 +251,7 @@ def invalid_userdefined_options(
"""
specified option names defined by a user are not valid
- This is different than invalid_option. In this case, the options are
+ This is different than invalid_options. In this case, the options are
supposed to be defined by a user. This report carries information that the
option names do not meet requirements, i.e. contain not allowed characters.
Invalid_options is used when the options are predefined by pcs (or
@@ -339,7 +339,7 @@ def mutually_exclusive_options(option_names, option_type):
"""
entered options can not coexist
set option_names contain entered mutually exclusive options
- string option_type decsribes the option
+ string option_type describes the option
"""
return ReportItem.error(
report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
@@ -1180,18 +1180,20 @@ def object_with_id_in_unexpected_context(
)
-def id_not_found(id, id_description, context_type="", context_id=""):
+def id_not_found(id, expected_types, context_type="", context_id=""):
"""
specified id does not exist in CIB, user referenced a nonexisting id
- string id specified id
- string id_description decribe id's role
- string context_id specifies the search area
+
+ string id -- specified id
+ list expected_types -- list of id's roles - expected types with the id
+ string context_type -- context_id's role / type
+ string context_id -- specifies the search area
"""
return ReportItem.error(
report_codes.ID_NOT_FOUND,
info={
"id": id,
- "id_description": id_description,
+ "expected_types": sorted(expected_types),
"context_type": context_type,
"context_id": context_id,
}
@@ -1404,6 +1406,21 @@ def cib_diff_error(reason, cib_old, cib_new):
}
)
+def cib_push_forced_full_due_to_crm_feature_set(required_set, current_set):
+ """
+ Pcs uses the "push full CIB" approach so race conditions may occur.
+
+ pcs.common.tools.Version required_set -- crm_feature_set required for diff
+ pcs.common.tools.Version current_set -- actual CIB crm_feature_set
+ """
+ return ReportItem.warning(
+ report_codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET,
+ info={
+ "required_set": str(required_set),
+ "current_set": str(current_set),
+ }
+ )
+
def cluster_state_cannot_load(reason):
"""
cannot load cluster status from crm_mon, crm_mon exited with non-zero code
@@ -2543,14 +2560,14 @@ def unable_to_upgrade_cib_to_required_version(
"""
Unable to upgrade CIB to minimal required schema version.
- current_version -- current version of CIB schema
- required_version -- required version of CIB schema
+ pcs.common.tools.Version current_version -- current version of CIB schema
+ pcs.common.tools.Version required_version -- required version of CIB schema
"""
return ReportItem.error(
report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
info={
- "required_version": "{0}.{1}.{2}".format(*required_version),
- "current_version": "{0}.{1}.{2}".format(*current_version)
+ "required_version": str(required_version),
+ "current_version": str(current_version)
}
)
@@ -2839,3 +2856,15 @@ def tmp_file_write(file_path, content):
"content": content,
}
)
+
+
+def unable_to_perform_operation_on_any_node():
+ """
+ This report is raised whenever
+ pcs.lib.communication.tools.OneByOneStrategyMixin strategy mixin is used
+ for network communication and operation failed on all available hosts and
+ because of this it is not possible to continue.
+ """
+ return ReportItem.error(
+ report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE,
+ )
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
index 4639477..34b18b9 100644
--- a/pcs/lib/resource_agent.py
+++ b/pcs/lib/resource_agent.py
@@ -487,7 +487,7 @@ class Agent(object):
)
if bad_opts:
- report_list.append(reports.invalid_option(
+ report_list.append(reports.invalid_options(
bad_opts,
sorted([attr["name"] for attr in self.get_parameters()]),
parameters_type,
@@ -836,6 +836,10 @@ class StonithAgent(CrmAgent):
"""
_stonithd_metadata = None
+ @classmethod
+ def clear_stonithd_metadata_cache(cls):
+ cls._stonithd_metadata = None
+
def _prepare_name_parts(self, name):
# pacemaker doesn't support stonith (nor resource) agents with : in type
if ":" in name:
diff --git a/pcs/lib/test/test_env.py b/pcs/lib/test/test_env.py
index 5660cf7..c57997c 100644
--- a/pcs/lib/test/test_env.py
+++ b/pcs/lib/test/test_env.py
@@ -240,24 +240,16 @@ class PushCorosyncConfLiveBase(TestCase):
])
self.corosync_conf_facade.need_stopped_cluster = False
self.corosync_conf_facade.need_qdevice_reload = False
- self.node_label_list = [
- dict(label="node-1"),
- dict(label="node-2"),
- ]
+ self.node_labels = ["node-1", "node-2"]
- at mock.patch("pcs.lib.external.is_systemctl")
+ at mock.patch("pcs.lib.external.is_systemctl", lambda: True)
class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
- def test_dont_need_stopped_cluster(self, mock_is_systemctl):
- mock_is_systemctl.return_value = True
+ def test_dont_need_stopped_cluster(self):
(self.config
- .http.add_communication(
- "distribute_corosync_conf",
- self.node_label_list,
- action="remote/set_corosync_conf",
- param_list=[("corosync_conf", self.corosync_conf_text)],
- response_code=200,
- output="Succeeded",
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ node_labels=self.node_labels
)
.runner.systemctl.is_active("corosync")
.runner.corosync.reload()
@@ -278,34 +270,96 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
fixture.info(report_codes.COROSYNC_CONFIG_RELOADED)
])
- def test_need_stopped_cluster(self, mock_is_systemctl):
- mock_is_systemctl.return_value = True
+ def test_dont_need_stopped_cluster_error(self):
+ (self.config
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
+ {
+ "label": "node-1",
+ },
+ {
+ "label": "node-2",
+ "response_code": 400,
+ "output": "Failed"
+ },
+ ]
+ )
+ )
+ env = self.env_assistant.get_env()
+ self.env_assistant.assert_raise_library_error(
+ lambda: env.push_corosync_conf(self.corosync_conf_facade),
+ []
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-2",
+ command="remote/set_corosync_conf",
+ reason="Failed",
+ ),
+ fixture.error(
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-2",
+ ),
+ ])
+
+ def test_dont_need_stopped_cluster_error_skip_offline(self):
+ (self.config
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
+ {
+ "label": "node-1",
+ },
+ {
+ "label": "node-2",
+ "response_code": 400,
+ "output": "Failed"
+ },
+ ]
+ )
+ .runner.systemctl.is_active("corosync")
+ .runner.corosync.reload()
+ )
+ self.env_assistant.get_env().push_corosync_conf(
+ self.corosync_conf_facade, skip_offline_nodes=True
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node="node-2",
+ command="remote/set_corosync_conf",
+ reason="Failed",
+ ),
+ fixture.warn(
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ node="node-2",
+ ),
+ fixture.info(report_codes.COROSYNC_CONFIG_RELOADED)
+ ])
+
+ def test_need_stopped_cluster(self):
self.corosync_conf_facade.need_stopped_cluster = True
(self.config
- .http.add_communication(
- "status",
- self.node_label_list,
- action="remote/status",
- response_code=200,
- output="""
-{"uptime":"0 days, 05:07:39","corosync":false,"pacemaker":false,"cman":false,\
-"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
-"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
-"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
-"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
-"resources":[],"groups":[],"constraints":{},"cluster_settings":{"error":\
-"Unable to get configuration settings"},"node_id":"","node_attr":{},\
-"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
-false,"acls":{},"username":"hacluster"}
- """,
+ .http.corosync.check_corosync_offline(
+ node_labels=self.node_labels
)
- .http.add_communication(
- "set_corosync_conf",
- self.node_label_list,
- action="remote/set_corosync_conf",
- param_list=[("corosync_conf", self.corosync_conf_text)],
- response_code=200,
- output="Succeeded",
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ node_labels=self.node_labels
)
.runner.systemctl.is_active("corosync", is_active=False)
)
@@ -333,28 +387,17 @@ false,"acls":{},"username":"hacluster"}
),
])
- def test_need_stopped_cluster_not_stopped(self, mock_is_systemctl):
+ def test_need_stopped_cluster_not_stopped(self):
self.corosync_conf_facade.need_stopped_cluster = True
- mock_is_systemctl.return_value = True
(self.config
- .http.add_communication(
- "status",
- self.node_label_list,
- action="remote/status",
- response_code=200,
- output="""
-{"uptime":"0 days, 06:29:36","corosync":true,"pacemaker":true,"cman":false,\
-"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
-"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":\
-["node-1","node-2"],"corosync_offline":[],"pacemaker_online":["node-1",\
-"node-2"],"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":\
-"cluster_name","resources":[],"groups":[],"constraints":{},"cluster_settings":\
-{"have-watchdog":"false","dc-version":"1.1.16-11.el7-94ff4df",\
-"cluster-infrastructure":"corosync","cluster-name":"cluster_name"},\
-"node_id":"1","node_attr":{},"fence_levels":{},"need_ring1_address":false,\
-"is_cman_with_udpu_transport":false,"acls":{"role":{},"group":{},"user":{},\
-"target":{}},"username":"hacluster"}
- """,
+ .http.corosync.check_corosync_offline(
+ communication_list=[
+ {
+ "label": node,
+ "output": '{"corosync":true}'
+ }
+ for node in self.node_labels
+ ]
)
)
env = self.env_assistant.get_env()
@@ -374,48 +417,21 @@ false,"acls":{},"username":"hacluster"}
),
])
- def test_need_stopped_cluster_not_stopped_skip_offline(
- self, mock_is_systemctl
- ):
- mock_is_systemctl.return_value = True
+ def test_need_stopped_cluster_not_stopped_skip_offline(self):
+ # If we know for sure that corosync is running, skip_offline doesn't
+ # matter.
self.corosync_conf_facade.need_stopped_cluster = True
(self.config
- .http.add_communication(
- "status",
- [
+ .http.corosync.check_corosync_offline(
+ communication_list=[
dict(
label="node-1",
- output="""\
-{"uptime":"0 days, 06:36:00","corosync":true,"pacemaker":true,"cman":false,\
-"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
-"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":\
-["node-1"],"corosync_offline":["node-2"],"pacemaker_online":["node-1"],\
-"pacemaker_offline":["node-2"],"pacemaker_standby":[],"cluster_name":\
-"cluster_name","resources":[],"groups":[],"constraints":{},"cluster_settings":\
-{"have-watchdog":"false","dc-version":"1.1.16-11.el7-94ff4df",\
-"cluster-infrastructure":"corosync","cluster-name":"cluster_name"},\
-"node_id":"1","node_attr":{},"fence_levels":{},"need_ring1_address":false,\
-"is_cman_with_udpu_transport":false,"acls":{"role":{},"group":{},"user":{},\
-"target":{}},"username":"hacluster"}
- """,
+ output='{"corosync":true}',
),
dict(
label="node-2",
- output="""\
-{"uptime":"0 days, 06:35:58","corosync":false,"pacemaker":false,"cman":false,\
-"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
-"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
-"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
-"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
-"resources":[],"groups":[],"constraints":{},"cluster_settings":\
-{"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\
-"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
-false,"acls":{},"username":"hacluster"}
- """,
),
- ],
- action="remote/status",
- response_code=200,
+ ]
)
)
env = self.env_assistant.get_env()
@@ -437,37 +453,58 @@ false,"acls":{},"username":"hacluster"}
)
])
- def test_need_stopped_cluster_comunnication_failure(
- self, mock_is_systemctl
- ):
- mock_is_systemctl.return_value = True
+ def test_need_stopped_cluster_json_error(self):
self.corosync_conf_facade.need_stopped_cluster = True
(self.config
- .http.add_communication(
- "status",
- [
+ .http.corosync.check_corosync_offline(
+ communication_list=[
+ dict(
+ label="node-1",
+ output="{" # not valid json
+ ),
+ dict(
+ label="node-2",
+ # The expected key (/corosync) is missing, we don't
+ # care about version 2 status key
+ # (/services/corosync/running)
+ output='{"services":{"corosync":{"running":true}}}'
+ ),
+ ]
+ )
+ )
+ env = self.env_assistant.get_env()
+ self.env_assistant.assert_raise_library_error(
+ lambda: env.push_corosync_conf(self.corosync_conf_facade),
+ []
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
+ fixture.error(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-1",
+ ),
+ fixture.error(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-2",
+ ),
+ ])
+
+ def test_need_stopped_cluster_comunnication_failure(self):
+ self.corosync_conf_facade.need_stopped_cluster = True
+ (self.config
+ .http.corosync.check_corosync_offline(
+ communication_list=[
dict(
label="node-1",
- response_code=200,
- output="""\
-{"uptime":"0 days, 00:11:52","corosync":false,"pacemaker":false,"cman":false,\
-"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
-"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
-"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
-"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
-"resources":[],"groups":[],"constraints":{},"cluster_settings":\
-{"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\
-"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
-false,"acls":{},"username":"hacluster"}
- """,
),
dict(
label="node-2",
response_code=401,
output="""{"notauthorized":"true"}"""
),
- ],
- action="remote/status",
+ ]
)
)
env = self.env_assistant.get_env()
@@ -493,54 +530,35 @@ false,"acls":{},"username":"hacluster"}
),
])
- def test_need_stopped_cluster_comunnication_failure_skip_offline(
- self, mock_is_systemctl
- ):
- mock_is_systemctl.return_value = True
+ def test_need_stopped_cluster_comunnication_failures_skip_offline(self):
+ # If we don't know if corosync is running, skip_offline matters.
self.corosync_conf_facade.need_stopped_cluster = True
(self.config
- .http.add_communication(
- "status",
- [
+ .http.corosync.check_corosync_offline(
+ communication_list=[
dict(
label="node-1",
- response_code=200,
- output="""\
-{"uptime":"0 days, 00:11:52","corosync":false,"pacemaker":false,"cman":false,\
-"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
-"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
-"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
-"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
-"resources":[],"groups":[],"constraints":{},"cluster_settings":\
-{"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\
-"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
-false,"acls":{},"username":"hacluster"}
- """,
+ output="{" # not valid json
),
dict(
label="node-2",
response_code=401,
output="""{"notauthorized":"true"}"""
),
- ],
- action="remote/status",
+ ]
)
- .http.add_communication(
- "set_corosync_conf",
- [
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
dict(
label="node-1",
- response_code=200,
- output="Succeeded",
),
dict(
label="node-2",
response_code=401,
output="""{"notauthorized":"true"}""",
)
- ],
- action="remote/set_corosync_conf",
- param_list=[("corosync_conf", self.corosync_conf_text)],
+ ]
)
.runner.systemctl.is_active("corosync", is_active=False)
)
@@ -549,8 +567,8 @@ false,"acls":{},"username":"hacluster"}
)
self.env_assistant.assert_reports([
fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
- fixture.info(
- report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ fixture.warn(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
node="node-1",
),
fixture.warn(
@@ -581,39 +599,28 @@ false,"acls":{},"username":"hacluster"}
])
- at mock.patch("pcs.lib.external.is_systemctl")
+ at mock.patch("pcs.lib.external.is_systemctl", lambda: True)
class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
- def test_qdevice_reload(self, mock_is_systemctl):
- mock_is_systemctl.return_value = True
+ def test_qdevice_reload(self):
self.corosync_conf_facade.need_qdevice_reload = True
(self.config
- .http.add_communication(
- "set_corosync_conf",
- self.node_label_list,
- action="remote/set_corosync_conf",
- param_list=[("corosync_conf", self.corosync_conf_text)],
- response_code=200,
- output="Succeeded",
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ node_labels=self.node_labels
)
.runner.systemctl.is_active("corosync", is_active=False)
- .http.add_communication(
- "qdevice_client_stop",
- self.node_label_list,
- action="remote/qdevice_client_stop",
- response_code=200,
- output="corosync-qdevice stopped",
+ .http.corosync.qdevice_client_stop(
+ node_labels=self.node_labels
)
- .http.add_communication(
- "qdevice_client_start",
- self.node_label_list,
- action="remote/qdevice_client_start",
- response_code=200,
- output="corosync-qdevice started",
+ .http.corosync.qdevice_client_start(
+ node_labels=self.node_labels
)
)
+
self.env_assistant.get_env().push_corosync_conf(
self.corosync_conf_facade
)
+
self.env_assistant.assert_reports([
fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
fixture.info(
@@ -651,38 +658,95 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
),
])
- def test_qdevice_reload_failures(self, mock_is_systemctl):
- mock_is_systemctl.return_value = True
+ def test_qdevice_reload_corosync_stopped(self):
self.corosync_conf_facade.need_qdevice_reload = True
(self.config
- .http.add_communication(
- "set_corosync_conf",
- self.node_label_list,
- action="remote/set_corosync_conf",
- param_list=[("corosync_conf", self.corosync_conf_text)],
- response_code=200,
- output="Succeeded",
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ node_labels=self.node_labels
)
.runner.systemctl.is_active("corosync", is_active=False)
- .http.add_communication(
- "qdevice_client_stop",
- [
+ .http.corosync.qdevice_client_stop(
+ node_labels=self.node_labels
+ )
+ .http.corosync.qdevice_client_start(
+ communication_list=[
+ {
+ "label": label,
+ "output": "corosync is not running, skipping",
+ }
+ for label in self.node_labels
+ ]
+ )
+ )
+
+ self.env_assistant.get_env().push_corosync_conf(
+ self.corosync_conf_facade
+ )
+
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-2",
+ ),
+ fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node="node-1",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node="node-2",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ fixture.info(
+ report_codes.SERVICE_START_SKIPPED,
+ node="node-1",
+ service="corosync-qdevice",
+ instance=None,
+ reason="corosync is not running",
+ ),
+ fixture.info(
+ report_codes.SERVICE_START_SKIPPED,
+ node="node-2",
+ service="corosync-qdevice",
+ instance=None,
+ reason="corosync is not running",
+ ),
+ ])
+
+ def test_qdevice_reload_failures(self):
+ # This also tests that failing to stop qdevice on a node doesn't prevent
+ # starting qdevice on the same node.
+ self.corosync_conf_facade.need_qdevice_reload = True
+ (self.config
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ node_labels=self.node_labels
+ )
+ .runner.systemctl.is_active("corosync", is_active=False)
+ .http.corosync.qdevice_client_stop(
+ communication_list=[
dict(
label="node-1",
- response_code=200,
- output="corosync-qdevice stopped",
),
dict(
label="node-2",
response_code=400,
output="error",
),
- ],
- action="remote/qdevice_client_stop",
+ ]
)
- .http.add_communication(
- "qdevice_client_start",
- [
+ .http.corosync.qdevice_client_start(
+ communication_list=[
dict(
label="node-1",
errno=8,
@@ -691,18 +755,17 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
),
dict(
label="node-2",
- response_code=200,
- output="corosync-qdevice started",
),
- ],
- action="remote/qdevice_client_start",
+ ]
)
)
+
env = self.env_assistant.get_env()
self.env_assistant.assert_raise_library_error(
lambda: env.push_corosync_conf(self.corosync_conf_facade),
[]
)
+
self.env_assistant.assert_reports([
fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
fixture.info(
@@ -742,17 +805,14 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
),
])
- def test_qdevice_reload_failures_skip_offline(self, mock_is_systemctl):
- mock_is_systemctl.return_value = True
+ def test_qdevice_reload_failures_skip_offline(self):
self.corosync_conf_facade.need_qdevice_reload = True
(self.config
- .http.add_communication(
- "set_corosync_conf",
- [
+ .http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
dict(
label="node-1",
- response_code=200,
- output="Succeeded",
),
dict(
label="node-2",
@@ -760,30 +820,23 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
error_msg="failure",
was_connected=False,
),
- ],
- action="remote/set_corosync_conf",
- param_list=[("corosync_conf", self.corosync_conf_text)],
+ ]
)
.runner.systemctl.is_active("corosync", is_active=False)
- .http.add_communication(
- "qdevice_client_stop",
- [
+ .http.corosync.qdevice_client_stop(
+ communication_list=[
dict(
label="node-1",
- response_code=200,
- output="corosync-qdevice stopped",
),
dict(
label="node-2",
response_code=400,
output="error",
),
- ],
- action="remote/qdevice_client_stop",
+ ]
)
- .http.add_communication(
- "qdevice_client_start",
- [
+ .http.corosync.qdevice_client_start(
+ communication_list=[
dict(
label="node-1",
errno=8,
@@ -792,17 +845,16 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
),
dict(
label="node-2",
- response_code=200,
- output="corosync-qdevice started",
),
- ],
- action="remote/qdevice_client_start",
+ ]
)
)
+
env = self.env_assistant.get_env()
env.push_corosync_conf(
self.corosync_conf_facade, skip_offline_nodes=True
)
+
self.env_assistant.assert_reports([
fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
fixture.info(
diff --git a/pcs/lib/test/test_env_cib.py b/pcs/lib/test/test_env_cib.py
index 33d4812..2431c92 100644
--- a/pcs/lib/test/test_env_cib.py
+++ b/pcs/lib/test/test_env_cib.py
@@ -4,69 +4,57 @@ from __future__ import (
print_function,
)
-import logging
from functools import partial
from lxml import etree
from pcs.common import report_codes
+from pcs.common.tools import Version
from pcs.lib.env import LibraryEnvironment
-from pcs.lib.errors import ReportItemSeverity as severity
from pcs.test.tools import fixture
-from pcs.test.tools.assertions import assert_xml_equal
+from pcs.test.tools.assertions import assert_xml_equal
from pcs.test.tools.command_env import get_env_tools
-from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.misc import get_test_resource as rc, create_patcher
+from pcs.test.tools.misc import (
+ get_test_resource as rc,
+ create_setup_patch_mixin,
+)
from pcs.test.tools.pcs_unittest import TestCase, mock
from pcs.test.tools.xml import etree_to_str
-patch_env = create_patcher("pcs.lib.env")
-patch_env_object = partial(mock.patch.object, LibraryEnvironment)
-
def mock_tmpfile(filename):
mock_file = mock.MagicMock()
mock_file.name = rc(filename)
return mock_file
- at patch_env_object("push_cib_diff")
- at patch_env_object("push_cib_full")
-class CibPushProxy(TestCase):
- def setUp(self):
- self.env = LibraryEnvironment(
- mock.MagicMock(logging.Logger),
- MockLibraryReportProcessor()
- )
- get_cib_patcher = patch_env_object(
- "get_cib",
- lambda self: "<cib />"
- )
- self.addCleanup(get_cib_patcher.stop)
- get_cib_patcher.start()
-
- def test_push_loaded(self, mock_push_full, mock_push_diff):
- self.env.get_cib()
- self.env.push_cib()
- mock_push_full.assert_not_called()
- mock_push_diff.assert_called_once_with(False)
-
- def test_push_loaded_wait(self, mock_push_full, mock_push_diff):
- self.env.get_cib()
- self.env.push_cib(wait=10)
- mock_push_full.assert_not_called()
- mock_push_diff.assert_called_once_with(10)
-
- def test_push_custom(self, mock_push_full, mock_push_diff):
- self.env.get_cib()
- self.env.push_cib(custom_cib="<cib />")
- mock_push_full.assert_called_once_with("<cib />", False)
- mock_push_diff.assert_not_called()
-
- def test_push_custom_wait(self, mock_push_full, mock_push_diff):
- self.env.get_cib()
- self.env.push_cib(custom_cib="<cib />", wait=10)
- mock_push_full.assert_called_once_with("<cib />", 10)
- mock_push_diff.assert_not_called()
+SetupPatchMixin = create_setup_patch_mixin(
+ partial(mock.patch.object, LibraryEnvironment)
+)
+
+
+class ManageCibAssertionMixin(object):
+ def assert_raises_cib_error(self, callable_obj, message):
+ with self.assertRaises(AssertionError) as context_manager:
+ callable_obj()
+ self.assertEqual(str(context_manager.exception), message)
+
+ def assert_raises_cib_not_loaded(self, callable_obj):
+ self.assert_raises_cib_error(
+ callable_obj,
+ "CIB has not been loaded"
+ )
+
+ def assert_raises_cib_already_loaded(self, callable_obj):
+ self.assert_raises_cib_error(
+ callable_obj,
+ "CIB has already been loaded"
+ )
+
+ def assert_raises_cib_loaded_cannot_custom(self, callable_obj):
+ self.assert_raises_cib_error(
+ callable_obj,
+ "CIB has been loaded, cannot push custom CIB"
+ )
class IsCibLive(TestCase):
@@ -80,154 +68,83 @@ class IsCibLive(TestCase):
self.assertFalse(env_assist.get_env().is_cib_live)
-class WaitSupportWithLiveCib(TestCase):
- wait_timeout = 10
-
+class UpgradeCib(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(test_case=self)
- self.config.runner.cib.load()
- def test_supports_timeout(self):
+ def test_get_and_push_cib_version_upgrade_needed(self):
(self.config
- .runner.pcmk.can_wait()
- .runner.cib.push()
- .runner.pcmk.wait(timeout=self.wait_timeout)
+ .runner.cib.load(name="load_cib_old", filename="cib-empty-2.6.xml")
+ .runner.cib.upgrade()
+ .runner.cib.load(filename="cib-empty-2.8.xml")
)
-
env = self.env_assist.get_env()
- env.get_cib()
- env.push_cib_full(wait=self.wait_timeout)
-
- self.env_assist.assert_reports([])
+ env.get_cib(Version(2, 8, 0))
- def test_does_not_support_timeout_without_pcmk_support(self):
- self.config.runner.pcmk.can_wait(stdout="cannot wait")
-
- env = self.env_assist.get_env()
- env.get_cib()
- self.env_assist.assert_raise_library_error(
- lambda: env.push_cib_full(wait=self.wait_timeout),
- [
- fixture.error(report_codes.WAIT_FOR_IDLE_NOT_SUPPORTED),
- ],
- expected_in_processor=False
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)]
)
- def test_raises_on_invalid_value(self):
- self.config.runner.pcmk.can_wait()
-
+ def test_get_and_push_cib_version_upgrade_not_needed(self):
+ self.config.runner.cib.load(filename="cib-empty-2.6.xml")
env = self.env_assist.get_env()
- env.get_cib()
- self.env_assist.assert_raise_library_error(
- lambda: env.push_cib_full(wait="abc"),
- [
- fixture.error(
- report_codes.INVALID_TIMEOUT_VALUE,
- timeout="abc"
- ),
- ],
- expected_in_processor=False
- )
-
-
-class WaitSupportWithMockedCib(TestCase):
- def test_does_not_suport_timeout(self):
- env_assist, config = get_env_tools(test_case=self)
- (config
- .env.set_cib_data("<cib/>")
- .runner.cib.load()
- )
-
- env = env_assist.get_env()
- env.get_cib()
- env_assist.assert_raise_library_error(
- lambda: env.push_cib_full(wait=10),
- [
- fixture.error(report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER),
- ],
- expected_in_processor=False
- )
-
-
-class MangeCibAssertionMixin(object):
- def assert_raises_cib_error(self, callable_obj, message):
- with self.assertRaises(AssertionError) as context_manager:
- callable_obj()
- self.assertEqual(str(context_manager.exception), message)
-
- def assert_raises_cib_not_loaded(self, callable_obj):
- self.assert_raises_cib_error(callable_obj, "CIB has not been loaded")
-
+ env.get_cib(Version(2, 5, 0))
-class CibPushFull(TestCase, MangeCibAssertionMixin):
- custom_cib = "<custom_cib />"
+class GetCib(TestCase, ManageCibAssertionMixin):
def setUp(self):
self.env_assist, self.config = get_env_tools(test_case=self)
- def test_push_custom_without_get(self):
- self.config.runner.cib.push_independent(self.custom_cib)
- self.env_assist.get_env().push_cib_full(etree.XML(self.custom_cib))
-
- def test_push_custom_after_get(self):
- self.config.runner.cib.load()
- env = self.env_assist.get_env()
- env.get_cib()
-
- with self.assertRaises(AssertionError) as context_manager:
- env.push_cib_full(etree.XML(self.custom_cib))
- self.assertEqual(
- str(context_manager.exception),
- "CIB has been loaded, cannot push custom CIB"
- )
-
- def test_push_fails(self):
+ def test_raise_library_error_when_cibadmin_failed(self):
+ stderr = "cibadmin: Connection to local file failed..."
(self.config
- .runner.cib.load()
- .runner.cib.push(stderr="invalid cib", returncode=1)
+ #Value of cib_data is unimportant here. This content is only put
+ #into tempfile when the runner is not mocked. And content is then
+ #loaded from tempfile by `cibadmin --local --query`. Runner is
+ #mocked in tests so the value of cib_data is not in the fact used.
+ .env.set_cib_data("whatever")
+ .runner.cib.load(returncode=203, stderr=stderr)
)
- env = self.env_assist.get_env()
- env.get_cib()
+
self.env_assist.assert_raise_library_error(
- env.push_cib_full,
+ self.env_assist.get_env().get_cib,
[
- (
- severity.ERROR,
- report_codes.CIB_PUSH_ERROR,
- {
- "reason": "invalid cib",
- },
- None
- )
+ fixture.error(report_codes.CIB_LOAD_ERROR, reason=stderr)
],
expected_in_processor=False
)
- def test_get_and_push(self):
+ def test_returns_cib_from_cib_data(self):
+ cib_filename = "cib-empty.xml"
(self.config
- .runner.cib.load()
- .runner.cib.push()
+ #Value of cib_data is unimportant here. See details in sibling test.
+ .env.set_cib_data("whatever")
+ .runner.cib.load(filename=cib_filename)
+ )
+ assert_xml_equal(
+ etree_to_str(self.env_assist.get_env().get_cib()),
+ open(rc(cib_filename)).read()
)
- env = self.env_assist.get_env()
- env.get_cib()
- env.push_cib_full()
- def test_can_get_after_push(self):
- (self.config
- .runner.cib.load()
- .runner.cib.push()
- .runner.cib.load(name="load_cib_2")
- )
+ def test_get_and_property(self):
+ self.config.runner.cib.load()
+ env = self.env_assist.get_env()
+ self.assertEqual(env.get_cib(), env.cib)
+ def test_property_without_get(self):
env = self.env_assist.get_env()
- env.get_cib()
- env.push_cib_full()
# need to use lambda because env.cib is a property
self.assert_raises_cib_not_loaded(lambda: env.cib)
+
+ def test_double_get(self):
+ self.config.runner.cib.load()
+ env = self.env_assist.get_env()
env.get_cib()
+ self.assert_raises_cib_already_loaded(env.get_cib)
-class CibPushDiff(TestCase, MangeCibAssertionMixin):
+class PushLoadedCib(TestCase, ManageCibAssertionMixin):
+ wait_timeout = 10
def setUp(self):
tmpfile_patcher = mock.patch("pcs.lib.pacemaker.live.write_tmpfile")
self.addCleanup(tmpfile_patcher.stop)
@@ -237,59 +154,142 @@ class CibPushDiff(TestCase, MangeCibAssertionMixin):
self.mock_write_tmpfile.side_effect = [
self.tmpfile_old, self.tmpfile_new
]
-
+ self.cib_can_diff = "cib-empty-2.0.xml"
+ self.cib_cannot_diff = "cib-empty-1.2.xml"
self.env_assist, self.config = get_env_tools(test_case=self)
- def config_load_and_push(self, filename="cib-empty.xml"):
+ def config_load_and_push_diff(self):
(self.config
- .runner.cib.load(filename=filename)
+ .runner.cib.load(filename=self.cib_can_diff)
.runner.cib.diff(self.tmpfile_old.name, self.tmpfile_new.name)
.runner.cib.push_diff()
)
- def push_reports(self, strip_old=False):
+ def config_load_and_push(self):
+ (self.config
+ .runner.cib.load(filename=self.cib_cannot_diff)
+ .runner.cib.push()
+ )
+
+ def push_reports(self, cib_old=None, cib_new=None):
# No test changes the CIB between load and push. The point is to test
# loading and pushing, not editing the CIB.
loaded_cib = self.config.calls.get("runner.cib.load").stdout
return [
- (
- severity.DEBUG,
+ fixture.debug(
report_codes.TMP_FILE_WRITE,
- {
- "file_path": self.tmpfile_old.name,
- "content": loaded_cib.strip() if strip_old else loaded_cib,
- },
- None
+ file_path=self.tmpfile_old.name,
+ content=(cib_old if cib_old is not None else loaded_cib)
),
- (
- severity.DEBUG,
+ fixture.debug(
report_codes.TMP_FILE_WRITE,
- {
- "file_path": self.tmpfile_new.name,
- "content": loaded_cib.strip(),
- },
- None
+ file_path=self.tmpfile_new.name,
+ content=(cib_new if cib_new is not None else loaded_cib).strip()
),
]
- def assert_tmps_write_reported(self):
+ def push_full_forced_reports(self, version):
+ return [
+ fixture.warn(
+ report_codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET,
+ current_set=version,
+ required_set="3.0.9"
+ )
+ ]
+
+ def test_get_and_push(self):
+ self.config_load_and_push_diff()
+ env = self.env_assist.get_env()
+
+ env.get_cib()
+ env.push_cib()
self.env_assist.assert_reports(self.push_reports())
+ def test_get_and_push_cannot_diff(self):
+ self.config_load_and_push()
+ env = self.env_assist.get_env()
+
+ env.get_cib()
+ env.push_cib()
+ self.env_assist.assert_reports(
+ self.push_full_forced_reports("3.0.8")
+ )
+
+ def test_modified_cib_features_do_not_matter(self):
+ self.config_load_and_push_diff()
+ env = self.env_assist.get_env()
+
+ cib = env.get_cib()
+ cib.set("crm_feature_set", "3.0.8")
+ env.push_cib()
+ self.env_assist.assert_reports(self.push_reports(
+ cib_new=self.config.calls.get("runner.cib.load").stdout.replace(
+ "3.0.9",
+ "3.0.8"
+ )
+ ))
+
+ def test_push_no_features_goes_with_full(self):
+ (self.config
+ .runner.cib.load_content("<cib />", name="runner.cib.load_content")
+ .runner.cib.push(load_key="runner.cib.load_content")
+ )
+ env = self.env_assist.get_env()
+
+ env.get_cib()
+ env.push_cib()
+ self.env_assist.assert_reports(
+ self.push_full_forced_reports("0.0.0")
+ )
+
+ def test_can_get_after_push(self):
+ self.config_load_and_push_diff()
+ self.config.runner.cib.load(
+ name="load_cib_2",
+ filename=self.cib_can_diff
+ )
+ env = self.env_assist.get_env()
+
+ env.get_cib()
+ env.push_cib()
+ # need to use lambda because env.cib is a property
+ self.assert_raises_cib_not_loaded(lambda: env.cib)
+ env.get_cib()
+ self.env_assist.assert_reports(self.push_reports())
+
+ def test_can_get_after_push_cannot_diff(self):
+ self.config_load_and_push()
+ self.config.runner.cib.load(
+ name="load_cib_2",
+ filename=self.cib_cannot_diff
+ )
+ env = self.env_assist.get_env()
+
+ env.get_cib()
+ env.push_cib()
+ # need to use lambda because env.cib is a property
+ self.assert_raises_cib_not_loaded(lambda: env.cib)
+ env.get_cib()
+ self.env_assist.assert_reports(
+ self.push_full_forced_reports("3.0.8")
+ )
+
+ def test_not_loaded(self):
+ env = self.env_assist.get_env()
+ self.assert_raises_cib_not_loaded(env.push_cib)
+
def test_tmpfile_fails(self):
- self.config.runner.cib.load()
+ self.config.runner.cib.load(filename=self.cib_can_diff)
self.mock_write_tmpfile.side_effect = EnvironmentError("test error")
env = self.env_assist.get_env()
+
env.get_cib()
self.env_assist.assert_raise_library_error(
- env.push_cib_diff,
+ env.push_cib,
[
- (
- severity.ERROR,
+ fixture.error(
report_codes.CIB_SAVE_TMP_ERROR,
- {
- "reason": "test error",
- },
- None
+ reason="test error",
)
],
expected_in_processor=False
@@ -297,7 +297,7 @@ class CibPushDiff(TestCase, MangeCibAssertionMixin):
def test_diff_fails(self):
(self.config
- .runner.cib.load()
+ .runner.cib.load(filename=self.cib_can_diff)
.runner.cib.diff(
self.tmpfile_old.name,
self.tmpfile_new.name,
@@ -308,141 +308,202 @@ class CibPushDiff(TestCase, MangeCibAssertionMixin):
env = self.env_assist.get_env()
env.get_cib()
self.env_assist.assert_raise_library_error(
- env.push_cib_diff,
+ env.push_cib,
[
- (
- severity.ERROR,
+ fixture.error(
report_codes.CIB_DIFF_ERROR,
- {
- "reason": "invalid cib",
- },
- None
+ reason="invalid cib",
)
],
expected_in_processor=False
)
- self.assert_tmps_write_reported()
+ self.env_assist.assert_reports(self.push_reports())
- def test_push_fails(self):
+ def test_push_diff_fails(self):
(self.config
- .runner.cib.load()
+ .runner.cib.load(filename=self.cib_can_diff)
.runner.cib.diff(self.tmpfile_old.name, self.tmpfile_new.name)
.runner.cib.push_diff(stderr="invalid cib", returncode=1)
)
env = self.env_assist.get_env()
env.get_cib()
self.env_assist.assert_raise_library_error(
- env.push_cib_diff,
+ env.push_cib,
[
- (
- severity.ERROR,
+ fixture.error(
report_codes.CIB_PUSH_ERROR,
- {
- "reason": "invalid cib",
- },
- None
+ reason="invalid cib",
)
],
expected_in_processor=False
)
- self.assert_tmps_write_reported()
-
- def test_get_and_push(self):
- self.config_load_and_push()
+ self.env_assist.assert_reports(self.push_reports())
+ def test_push_fails(self):
+ (self.config
+ .runner.cib.load(filename=self.cib_cannot_diff)
+ .runner.cib.push(stderr="invalid cib", returncode=1)
+ )
env = self.env_assist.get_env()
-
env.get_cib()
- env.push_cib_diff()
- self.assert_tmps_write_reported()
-
- def test_can_get_after_push(self):
- self.config_load_and_push()
- self.config.runner.cib.load(name="load_cib_2")
+ self.env_assist.assert_raise_library_error(
+ env.push_cib,
+ [
+ fixture.error(
+ report_codes.CIB_PUSH_ERROR,
+ reason="invalid cib",
+ )
+ ],
+ expected_in_processor=False
+ )
+ self.env_assist.assert_reports(
+ self.push_full_forced_reports("3.0.8")
+ )
+ def test_wait(self):
+ (self.config
+ .runner.cib.load(filename=self.cib_can_diff)
+ .runner.pcmk.can_wait()
+ .runner.cib.diff(self.tmpfile_old.name, self.tmpfile_new.name)
+ .runner.cib.push_diff()
+ .runner.pcmk.wait(timeout=self.wait_timeout)
+ )
env = self.env_assist.get_env()
- env.get_cib()
- env.push_cib_diff()
- # need to use lambda because env.cib is a property
- self.assert_raises_cib_not_loaded(lambda: env.cib)
- env.get_cib()
- self.assert_tmps_write_reported()
-
-class UpgradeCib(TestCase):
- def setUp(self):
- self.env_assist, self.config = get_env_tools(test_case=self)
+ env.get_cib()
+ env.push_cib(wait=self.wait_timeout)
+ self.env_assist.assert_reports(self.push_reports())
- def test_get_and_push_cib_version_upgrade_needed(self):
+ def test_wait_cannot_diff(self):
(self.config
- .runner.cib.load(name="load_cib_old")
- .runner.cib.upgrade()
- .runner.cib.load(filename="cib-empty-2.8.xml")
+ .runner.cib.load(filename=self.cib_cannot_diff)
+ .runner.pcmk.can_wait()
+ .runner.cib.push()
+ .runner.pcmk.wait(timeout=self.wait_timeout)
)
env = self.env_assist.get_env()
- env.get_cib((2, 8, 0))
+ env.get_cib()
+ env.push_cib(wait=self.wait_timeout)
self.env_assist.assert_reports(
- [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)]
+ self.push_full_forced_reports("3.0.8")
)
- def test_get_and_push_cib_version_upgrade_not_needed(self):
- self.config.runner.cib.load(filename="cib-empty-2.6.xml")
+ def test_wait_not_supported(self):
+ (self.config
+ .runner.cib.load(filename=self.cib_can_diff)
+ .runner.pcmk.can_wait(stdout="cannot wait")
+ )
env = self.env_assist.get_env()
- env.get_cib((2, 5, 0))
-
-class ManageCib(TestCase, MangeCibAssertionMixin):
- def setUp(self):
- self.env_assist, self.config = get_env_tools(test_case=self)
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ lambda: env.push_cib(wait=self.wait_timeout),
+ [
+ fixture.error(report_codes.WAIT_FOR_IDLE_NOT_SUPPORTED),
+ ],
+ expected_in_processor=False
+ )
- def test_raise_library_error_when_cibadmin_failed(self):
- stderr = "cibadmin: Connection to local file failed..."
+ def test_wait_raises_on_invalid_value(self):
(self.config
- #Value of cib_data is unimportant here. This content is only put
- #into tempfile when the runner is not mocked. And content is then
- #loaded from tempfile by `cibadmin --local --query`. Runner is
- #mocked in tests so the value of cib_data is not in the fact used.
- .env.set_cib_data("whatever")
- .runner.cib.load(returncode=203, stderr=stderr)
+ .runner.cib.load(filename=self.cib_can_diff)
+ .runner.pcmk.can_wait()
)
+ env = self.env_assist.get_env()
+ env.get_cib()
self.env_assist.assert_raise_library_error(
- self.env_assist.get_env().get_cib,
+ lambda: env.push_cib(wait="abc"),
[
- fixture.error(report_codes.CIB_LOAD_ERROR, reason=stderr)
+ fixture.error(
+ report_codes.INVALID_TIMEOUT_VALUE,
+ timeout="abc"
+ ),
],
expected_in_processor=False
)
- def test_returns_cib_from_cib_data(self):
- cib_filename = "cib-empty.xml"
+
+class PushCustomCib(TestCase, ManageCibAssertionMixin):
+ custom_cib = "<custom_cib />"
+ wait_timeout = 10
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def test_push_without_get(self):
+ self.config.runner.cib.push_independent(self.custom_cib)
+ self.env_assist.get_env().push_cib(etree.XML(self.custom_cib))
+
+ def test_push_after_get(self):
+ self.config.runner.cib.load()
+ env = self.env_assist.get_env()
+
+ env.get_cib()
+ self.assert_raises_cib_loaded_cannot_custom(
+ partial(env.push_cib, etree.XML(self.custom_cib))
+ )
+
+ def test_wait(self):
(self.config
- #Value of cib_data is unimportant here. See details in sibling test.
- .env.set_cib_data("whatever")
- .runner.cib.load(filename=cib_filename)
+ .runner.pcmk.can_wait()
+ .runner.cib.push_independent(self.custom_cib)
+ .runner.pcmk.wait(timeout=self.wait_timeout)
)
- assert_xml_equal(
- etree_to_str(self.env_assist.get_env().get_cib()),
- open(rc(cib_filename)).read()
+ env = self.env_assist.get_env()
+
+ env.push_cib(
+ etree.XML(self.custom_cib),
+ wait=self.wait_timeout
)
- def test_get_and_property(self):
- self.config.runner.cib.load()
+ def test_wait_not_supported(self):
+ self.config.runner.pcmk.can_wait(stdout="cannot wait")
env = self.env_assist.get_env()
- self.assertEqual(env.get_cib(), env.cib)
- def test_property_without_get(self):
- env = self.env_assist.get_env()
- # need to use lambda because env.cib is a property
- self.assert_raises_cib_not_loaded(lambda: env.cib)
+ self.env_assist.assert_raise_library_error(
+ lambda: env.push_cib(
+ etree.XML(self.custom_cib),
+ wait=self.wait_timeout
+ ),
+ [
+ fixture.error(report_codes.WAIT_FOR_IDLE_NOT_SUPPORTED),
+ ],
+ expected_in_processor=False
+ )
- def test_double_get(self):
- self.config.runner.cib.load()
+ def test_wait_raises_on_invalid_value(self):
+ self.config.runner.pcmk.can_wait()
env = self.env_assist.get_env()
- env.get_cib()
- self.assert_raises_cib_error(env.get_cib, "CIB has already been loaded")
- def test_push_without_get(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: env.push_cib(etree.XML(self.custom_cib), wait="abc"),
+ [
+ fixture.error(
+ report_codes.INVALID_TIMEOUT_VALUE,
+ timeout="abc"
+ ),
+ ],
+ expected_in_processor=False
+ )
+
+
+class PushCibMockedWithWait(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def test_wait_not_suported_for_mocked_cib(self):
+ (self.config
+ .env.set_cib_data("<cib/>")
+ .runner.cib.load()
+ )
+
env = self.env_assist.get_env()
- self.assert_raises_cib_not_loaded(env.push_cib_diff)
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ lambda: env.push_cib(wait=10),
+ [
+ fixture.error(report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER),
+ ],
+ expected_in_processor=False
+ )
diff --git a/pcs/lib/test/test_nodes_task.py b/pcs/lib/test/test_nodes_task.py
deleted file mode 100644
index 249119b..0000000
--- a/pcs/lib/test/test_nodes_task.py
+++ /dev/null
@@ -1,691 +0,0 @@
-from __future__ import (
- absolute_import,
- division,
- print_function,
-)
-
-import json
-
-from pcs.test.tools.pcs_unittest import TestCase, skip
-
-from pcs.test.tools.assertions import (
- assert_raise_library_error,
- assert_report_item_list_equal,
-)
-from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.pcs_unittest import mock
-from pcs.test.tools.misc import create_patcher
-
-from pcs.common import report_codes
-from pcs.lib.external import NodeCommunicator, NodeAuthenticationException
-from pcs.lib.node import NodeAddresses, NodeAddressesList
-from pcs.lib.errors import ReportItemSeverity as severity
-
-# import pcs.lib.nodes_task as lib
-lib = mock.Mock()
-lib.__name__ = "nodes_task"
-
-patch_nodes_task = create_patcher(lib)
-
- at skip("TODO: rewrite for pcs.lib.communication.corosync.DistributeCorosyncConf")
-class DistributeCorosyncConfTest(TestCase):
- def setUp(self):
- self.mock_reporter = MockLibraryReportProcessor()
- self.mock_communicator = "mock node communicator"
-
- @patch_nodes_task("corosync_live")
- def test_success(self, mock_corosync_live):
- conf_text = "test conf text"
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- mock_corosync_live.set_remote_corosync_conf = mock.MagicMock()
-
- lib.distribute_corosync_conf(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list,
- conf_text
- )
-
- corosync_live_calls = [
- mock.call.set_remote_corosync_conf(
- "mock node communicator", node_addrs_list[0], conf_text
- ),
- mock.call.set_remote_corosync_conf(
- "mock node communicator", node_addrs_list[1], conf_text
- ),
- ]
- self.assertEqual(
- len(corosync_live_calls),
- len(mock_corosync_live.mock_calls)
- )
- mock_corosync_live.set_remote_corosync_conf.assert_has_calls(
- corosync_live_calls,
- any_order=True
- )
-
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
- {}
- ),
- (
- severity.INFO,
- report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
- {"node": nodes[0]}
- ),
- (
- severity.INFO,
- report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
- {"node": nodes[1]}
- ),
- ]
- )
-
- @patch_nodes_task("corosync_live")
- def test_one_node_down(self, mock_corosync_live):
- conf_text = "test conf text"
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- mock_corosync_live.set_remote_corosync_conf = mock.MagicMock()
- def raiser(comm, node, conf):
- if node.ring0 == nodes[1]:
- raise NodeAuthenticationException(
- nodes[1], "command", "HTTP error: 401"
- )
- mock_corosync_live.set_remote_corosync_conf.side_effect = raiser
-
- assert_raise_library_error(
- lambda: lib.distribute_corosync_conf(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list,
- conf_text
- ),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
- {
- "node": nodes[1],
- },
- report_codes.SKIP_OFFLINE_NODES
- )
- )
-
- corosync_live_calls = [
- mock.call.set_remote_corosync_conf(
- "mock node communicator", nodes[0], conf_text
- ),
- mock.call.set_remote_corosync_conf(
- "mock node communicator", nodes[1], conf_text
- ),
- ]
- self.assertEqual(
- len(corosync_live_calls),
- len(mock_corosync_live.mock_calls)
- )
- mock_corosync_live.set_remote_corosync_conf.assert_has_calls([
- mock.call("mock node communicator", node_addrs_list[0], conf_text),
- mock.call("mock node communicator", node_addrs_list[1], conf_text),
- ], any_order=True)
-
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
- {}
- ),
- (
- severity.INFO,
- report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
- {"node": nodes[0]}
- ),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
- {
- "node": nodes[1],
- },
- report_codes.SKIP_OFFLINE_NODES
- )
- ]
- )
-
- @patch_nodes_task("corosync_live")
- def test_one_node_down_forced(self, mock_corosync_live):
- conf_text = "test conf text"
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- mock_corosync_live.set_remote_corosync_conf = mock.MagicMock()
- def raiser(comm, node, conf):
- if node.ring0 == nodes[1]:
- raise NodeAuthenticationException(
- nodes[1], "command", "HTTP error: 401"
- )
- mock_corosync_live.set_remote_corosync_conf.side_effect = raiser
-
- lib.distribute_corosync_conf(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list,
- conf_text,
- skip_offline_nodes=True
- )
-
- corosync_live_calls = [
- mock.call.set_remote_corosync_conf(
- "mock node communicator", nodes[0], conf_text
- ),
- mock.call.set_remote_corosync_conf(
- "mock node communicator", nodes[1], conf_text
- ),
- ]
- self.assertEqual(
- len(corosync_live_calls),
- len(mock_corosync_live.mock_calls)
- )
- mock_corosync_live.set_remote_corosync_conf.assert_has_calls([
- mock.call("mock node communicator", node_addrs_list[0], conf_text),
- mock.call("mock node communicator", node_addrs_list[1], conf_text),
- ], any_order=True)
-
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
- {}
- ),
- (
- severity.INFO,
- report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
- {"node": nodes[0]}
- ),
- (
- severity.WARNING,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- }
- ),
- (
- severity.WARNING,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
- {
- "node": nodes[1],
- }
- ),
- ]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.corosync.CheckCorosyncOffline")
-class CheckCorosyncOfflineTest(TestCase):
- def setUp(self):
- self.mock_reporter = MockLibraryReportProcessor()
- self.mock_communicator = mock.MagicMock(NodeCommunicator)
-
- def test_success(self):
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- self.mock_communicator.call_node.return_value = '{"corosync": false}'
-
- lib.check_corosync_offline_on_nodes(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list
- )
-
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED,
- {}
- ),
- (
- severity.INFO,
- report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
- {"node": nodes[0]}
- ),
- (
- severity.INFO,
- report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
- {"node": nodes[1]}
- ),
- ]
- )
-
- def test_one_node_running(self):
- node_responses = {
- "node1": '{"corosync": false}',
- "node2": '{"corosync": true}',
- }
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in node_responses.keys()]
- )
-
- self.mock_communicator.call_node.side_effect = (
- lambda node, request, data: node_responses[node.label]
- )
-
-
- assert_raise_library_error(
- lambda: lib.check_corosync_offline_on_nodes(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_RUNNING_ON_NODE,
- {
- "node": "node2",
- }
- )
- )
-
- def test_json_error(self):
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- self.mock_communicator.call_node.side_effect = [
- '{}', # missing key
- '{', # not valid json
- ]
-
- assert_raise_library_error(
- lambda: lib.check_corosync_offline_on_nodes(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
- {
- "node": nodes[0],
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
- {
- "node": nodes[1],
- },
- report_codes.SKIP_OFFLINE_NODES
- )
- )
-
- def test_node_down(self):
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- def side_effect(node, request, data):
- if node.ring0 == nodes[1]:
- raise NodeAuthenticationException(
- nodes[1], "command", "HTTP error: 401"
- )
- return '{"corosync": false}'
- self.mock_communicator.call_node.side_effect = side_effect
-
- assert_raise_library_error(
- lambda: lib.check_corosync_offline_on_nodes(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list
- ),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
- {
- "node": nodes[1],
- },
- report_codes.SKIP_OFFLINE_NODES
- )
- )
-
- def test_errors_forced(self):
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- def side_effect(node, request, data):
- if node.ring0 == nodes[1]:
- raise NodeAuthenticationException(
- nodes[1], "command", "HTTP error: 401"
- )
- return '{' # invalid json
- self.mock_communicator.call_node.side_effect = side_effect
-
- lib.check_corosync_offline_on_nodes(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list,
- skip_offline_nodes=True
- )
-
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED,
- {}
- ),
- (
- severity.WARNING,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
- {
- "node": nodes[0],
- }
- ),
- (
- severity.WARNING,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- }
- ),
- (
- severity.WARNING,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
- {
- "node": nodes[1],
- }
- )
- ]
- )
-
- at skip(
- "TODO: rewrite for pcs.lib.communication.qdevice.Stop and "
- "pcs.lib.communication.qdevice.Start"
-)
- at patch_nodes_task("qdevice_client.remote_client_stop")
- at patch_nodes_task("qdevice_client.remote_client_start")
-class QdeviceReloadOnNodesTest(TestCase):
- def setUp(self):
- self.mock_reporter = MockLibraryReportProcessor()
- self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-
- def test_success(self, mock_remote_start, mock_remote_stop):
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
-
- lib.qdevice_reload_on_nodes(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list
- )
-
- node_calls = [
- mock.call(
- self.mock_reporter, self.mock_communicator, node_addrs_list[0]
- ),
- mock.call(
- self.mock_reporter, self.mock_communicator, node_addrs_list[1]
- ),
- ]
- self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls))
- self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls))
- mock_remote_stop.assert_has_calls(node_calls, any_order=True)
- mock_remote_start.assert_has_calls(node_calls, any_order=True)
-
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
- {}
- ),
- ]
- )
-
- def test_fail_doesnt_prevent_start(
- self, mock_remote_start, mock_remote_stop
- ):
- nodes = ["node1", "node2"]
- node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
- )
- def raiser(reporter, communicator, node):
- if node.ring0 == nodes[1]:
- raise NodeAuthenticationException(
- node.label, "command", "HTTP error: 401"
- )
- mock_remote_start.side_effect = raiser
-
- assert_raise_library_error(
- lambda: lib.qdevice_reload_on_nodes(
- self.mock_communicator,
- self.mock_reporter,
- node_addrs_list
- ),
- # why the same error twice?
- # 1. Tested piece of code calls a function which puts an error
- # into the reporter. The reporter raises an exception. The
- # exception is caught in the tested piece of code, stored, and
- # later put to reporter again.
- # 2. Mock reporter remembers everything that goes through it
- # and by the machanism described in 1 the error goes througt it
- # twice.
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- },
- report_codes.SKIP_OFFLINE_NODES
- )
- )
-
- node_calls = [
- mock.call(
- self.mock_reporter, self.mock_communicator, node_addrs_list[0]
- ),
- mock.call(
- self.mock_reporter, self.mock_communicator, node_addrs_list[1]
- ),
- ]
- self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls))
- self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls))
- mock_remote_stop.assert_has_calls(node_calls, any_order=True)
- mock_remote_start.assert_has_calls(node_calls, any_order=True)
-
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.INFO,
- report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
- {}
- ),
- # why the same error twice?
- # 1. Tested piece of code calls a function which puts an error
- # into the reporter. The reporter raises an exception. The
- # exception is caught in the tested piece of code, stored, and
- # later put to reporter again.
- # 2. Mock reporter remembers everything that goes through it
- # and by the machanism described in 1 the error goes througt it
- # twice.
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- "node": nodes[1],
- "command": "command",
- "reason" : "HTTP error: 401",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- ]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.nodes.GetOnlineTargets")
-class NodeCheckAuthTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- lib.node_check_auth(mock_communicator, node)
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/check_auth", "check_auth_only=1"
- )
-
-
-def fixture_invalid_response_format(node_label):
- return (
- severity.ERROR,
- report_codes.INVALID_RESPONSE_FORMAT,
- {
- "node": node_label
- },
- None
- )
-
-def assert_call_cause_reports(call, expected_report_items):
- report_items = []
- call(report_items)
- assert_report_item_list_equal(report_items, expected_report_items)
-
- at skip("TODO: rewrite for pcs.lib.communication.nodes.PrecheckNewNode")
-class CheckCanAddNodeToCluster(TestCase):
- def setUp(self):
- self.node = NodeAddresses("node1")
- self.node_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-
- def assert_result_causes_invalid_format(self, result):
- self.node_communicator.call_node = mock.Mock(
- return_value=json.dumps(result)
- )
- assert_call_cause_reports(
- self.make_call,
- [fixture_invalid_response_format(self.node.label)],
- )
-
- def make_call(self, report_items):
- lib.check_can_add_node_to_cluster(
- self.node_communicator,
- self.node,
- report_items,
- check_response=(
- lambda availability_info, report_items, node_label: None
- )
- )
-
- def test_report_no_dict_in_json_response(self):
- self.assert_result_causes_invalid_format("bad answer")
-
-class OnNodeTest(TestCase):
- def setUp(self):
- self.reporter = MockLibraryReportProcessor()
- self.node = NodeAddresses("node1")
- self.node_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-
- def set_call_result(self, result):
- self.node_communicator.call_node = mock.Mock(
- return_value=json.dumps(result)
- )
-
- at skip(
- "TODO: rewrite for pcs.lib.communication.nodes.RunActionBase and it's "
- "descendants"
-)
-class RunActionOnNode(OnNodeTest):
- def make_call(self):
- return lib.run_actions_on_node(
- self.node_communicator,
- "remote/run_action",
- "actions",
- self.reporter,
- self.node,
- {"action": {"type": "any_mock_type"}}
- )
-
- def test_return_node_action_result(self):
- self.set_call_result({
- "actions": {
- "action": {
- "code": "some_code",
- "message": "some_message",
- }
- }
- })
- result = self.make_call()["action"]
- self.assertEqual(result.code, "some_code")
- self.assertEqual(result.message, "some_message")
diff --git a/pcs/lib/test/test_resource_agent.py b/pcs/lib/test/test_resource_agent.py
index a8be5fc..4ec94e2 100644
--- a/pcs/lib/test/test_resource_agent.py
+++ b/pcs/lib/test/test_resource_agent.py
@@ -1330,7 +1330,7 @@ class AgentMetadataValidateParameters(TestCase):
[
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["invalid_param"],
"option_type": "resource",
@@ -1440,7 +1440,7 @@ class AgentMetadataValidateParameters(TestCase):
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["obsoletes"],
"option_type": "resource",
diff --git a/pcs/lib/test/test_validate.py b/pcs/lib/test/test_validate.py
index 3aea4eb..6db3df6 100644
--- a/pcs/lib/test/test_validate.py
+++ b/pcs/lib/test/test_validate.py
@@ -838,7 +838,7 @@ class NamesIn(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["x", "y"],
"allowed": ["a", "b", "c"],
@@ -860,7 +860,7 @@ class NamesIn(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["x", "y"],
"allowed": ["a", "b", "c"],
@@ -883,7 +883,7 @@ class NamesIn(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["x", "y"],
"allowed": ["a", "b", "c"],
@@ -906,7 +906,7 @@ class NamesIn(TestCase):
[
(
severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["x", "y"],
"allowed": ["a", "b", "c"],
@@ -930,7 +930,7 @@ class NamesIn(TestCase):
[
(
severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["x", "y"],
"allowed": ["a", "b", "c"],
diff --git a/pcs/lib/validate.py b/pcs/lib/validate.py
index 14a5bd4..995ac0c 100644
--- a/pcs/lib/validate.py
+++ b/pcs/lib/validate.py
@@ -204,17 +204,17 @@ def names_in(
allowed_option_patterns=None
):
"""
- Return a list with report INVALID_OPTION when in name_list is a name that is
+ Return a list with report INVALID_OPTIONS when in name_list is a name that is
not in allowed_name_list.
list allowed_name_list contains names which are valid
list name_list contains names for validation
string option_type describes type of option for reporting purposes
string code_to_allow_extra_names is code for forcing invalid names. If it is
- empty report INVALID_OPTION is non-forceable error. If it is not empty
- report INVALID_OPTION is forceable error or warning.
+ empty report INVALID_OPTIONS is non-forceable error. If it is not empty
+ report INVALID_OPTIONS is forceable error or warning.
bool allow_extra_names is flag that complements code_to_allow_extra_names
- and determines wheter is report INVALID_OPTION forceable error or
+ and determines wheter is report INVALID_OPTIONS forceable error or
warning.
mixed allowed_option_patterns -- option patterns to be added to a report
"""
@@ -227,7 +227,7 @@ def names_in(
allow_extra_names
)
return [create_report(
- reports.invalid_option,
+ reports.invalid_options,
sorted(invalid_names),
sorted(allowed_name_list),
option_type,
@@ -250,10 +250,10 @@ def value_cond(
description of value type
string option_name_for_report is substitued by option name if is None
string code_to_allow_extra_values is code for forcing invalid names. If it
- is empty report INVALID_OPTION is non-forceable error. If it is not
- empty report INVALID_OPTION is forceable error or warning.
+ is empty report INVALID_OPTION_VALUE is non-forceable error. If it is
+ not empty report INVALID_OPTION_VALUE is forceable error or warning.
bool allow_extra_values is flag that complements code_to_allow_extra_values
- and determines wheter is report INVALID_OPTION forceable error or
+ and determines wheter is report INVALID_OPTION_VALUE forceable error or
warning.
"""
@_if_option_exists(option_name)
@@ -325,10 +325,10 @@ def value_in(
allowed_values -- list of strings, list of possible values
option_name_for_report -- string, it is substitued by option name if is None
code_to_allow_extra_values -- string, code for forcing invalid names. If it
- is empty report INVALID_OPTION is non-forceable error. If it is not
- empty report INVALID_OPTION is forceable error or warning.
+ is empty report INVALID_OPTION_VALUE is non-forceable error. If it is
+ not empty report INVALID_OPTION_VALUE is forceable error or warning.
allow_extra_values -- bool, flag that complements code_to_allow_extra_values
- and determines wheter is report INVALID_OPTION forceable error or
+ and determines wheter is report INVALID_OPTION_VALUE forceable error or
warning.
"""
return value_cond(
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 15454c7..fc85164 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "November 2017" "pcs 0.9.162" "System Administration Utilities"
+.TH PCS "8" "February 2018" "pcs 0.9.163" "System Administration Utilities"
.SH NAME
pcs \- pacemaker/corosync configuration system
.SH SYNOPSIS
@@ -352,11 +352,11 @@ Add/Change options to specified stonith id.
delete <stonith id>
Remove stonith id from configuration.
.TP
-enable <stonith id> [\fB\-\-wait[=n]\fR]
-Allow the cluster to use the stonith device. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to start and then return 0 if the stonith device is started, or 1 if the stonith device has not yet started. If 'n' is not specified it defaults to 60 minutes.
+enable <stonith id>... [\fB\-\-wait[=n]\fR]
+Allow the cluster to use the stonith devices. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith devices to start and then return 0 if the stonith devices are started, or 1 if the stonith devices have not yet started. If 'n' is not specified it defaults to 60 minutes.
.TP
-disable <stonith id> [\fB\-\-wait[=n]\fR]
-Attempt to stop the stonith device if it is running and disallow the cluster to use it. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to stop and then return 0 if the stonith device is stopped or 1 if the stonith device has not stopped. If 'n' is not specified it defaults to 60 minutes.
+disable <stonith id>... [\fB\-\-wait[=n]\fR]
+Attempt to stop the stonith devices if they are running and disallow the cluster to use them. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith devices to stop and then return 0 if the stonith devices are stopped or 1 if the stonith devices have not stopped. If 'n' is not specified it defaults to 60 minutes.
.TP
cleanup [<stonith id>] [\fB\-\-node\fR <node>]
Make the cluster forget failed operations from history of the stonith device and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a stonith id is not specified then all resources / stonith devices will be cleaned up. If a node is not specified then resources / stonith devices on all nodes will be cleaned up.
@@ -709,6 +709,9 @@ View current quorum status.
qdevice <device model> [\fB\-\-full\fR] [<cluster name>]
Show runtime status of specified model of quorum device provider. Using \fB\-\-full\fR will give more detailed output. If <cluster name> is specified, only information about the specified cluster will be displayed.
.TP
+booth
+Print current status of booth on the local node.
+.TP
nodes [corosync | both | config]
View current status of nodes from pacemaker. If 'corosync' is specified, view current status of nodes from corosync instead. If 'both' is specified, view current status of nodes from both corosync & pacemaker. If 'config' is specified, print nodes from corosync & pacemaker configuration.
.TP
diff --git a/pcs/pcsd.py b/pcs/pcsd.py
index 657cbc9..3f6b2b6 100644
--- a/pcs/pcsd.py
+++ b/pcs/pcsd.py
@@ -194,12 +194,14 @@ def pcsd_restart_nodes(nodes, exit_after_error=True, async_restart=False):
return
# check if the restart was performed already
+ sleep_seconds = 3
+ total_wait_seconds = 60 * 5
error = False
- for _ in range(5):
+ for _ in range(int(total_wait_seconds / sleep_seconds)):
if not instance_signatures:
# no more nodes to check
break
- time.sleep(2)
+ time.sleep(sleep_seconds)
for node, signature in list(instance_signatures.items()):
retval, output = utils.getPcsdInstanceSignature(node)
if retval == 0 and signature != output:
@@ -210,12 +212,11 @@ def pcsd_restart_nodes(nodes, exit_after_error=True, async_restart=False):
del instance_signatures[node]
utils.err(output, False)
error = True
- # if connection refused or http error occurs the dameon is just
+ # if connection refused or an http error occurs the dameon is just
# restarting so we'll try it again
if instance_signatures:
- for node in sorted(instance_signatures.keys()):
+ for node in sorted(instance_signatures):
utils.err("{0}: Not restarted".format(node), False)
error = True
if error and exit_after_error:
sys.exit(1)
-
diff --git a/pcs/quorum.py b/pcs/quorum.py
index 10c2760..51f1b54 100644
--- a/pcs/quorum.py
+++ b/pcs/quorum.py
@@ -16,7 +16,7 @@ from pcs.cli.common.console_report import indent
from pcs.cli.common.errors import CmdLineInputError
from pcs.lib.errors import LibraryError
-def quorum_cmd(lib, argv, modificators):
+def quorum_cmd(lib, argv, modifiers):
if len(argv) < 1:
sub_cmd, argv_next = "config", []
else:
@@ -26,18 +26,18 @@ def quorum_cmd(lib, argv, modificators):
if sub_cmd == "help":
usage.quorum([" ".join(argv_next)] if argv_next else [])
elif sub_cmd == "config":
- quorum_config_cmd(lib, argv_next, modificators)
+ quorum_config_cmd(lib, argv_next, modifiers)
elif sub_cmd == "expected-votes":
- quorum_expected_votes_cmd(lib, argv_next, modificators)
+ quorum_expected_votes_cmd(lib, argv_next, modifiers)
elif sub_cmd == "status":
- quorum_status_cmd(lib, argv_next, modificators)
+ quorum_status_cmd(lib, argv_next, modifiers)
elif sub_cmd == "device":
- quorum_device_cmd(lib, argv_next, modificators)
+ quorum_device_cmd(lib, argv_next, modifiers)
elif sub_cmd == "unblock":
# TODO switch to new architecture
quorum_unblock_cmd(argv_next)
elif sub_cmd == "update":
- quorum_update_cmd(lib, argv_next, modificators)
+ quorum_update_cmd(lib, argv_next, modifiers)
else:
raise CmdLineInputError()
except LibraryError as e:
@@ -45,22 +45,22 @@ def quorum_cmd(lib, argv, modificators):
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(e, "quorum", sub_cmd)
-def quorum_device_cmd(lib, argv, modificators):
+def quorum_device_cmd(lib, argv, modifiers):
if len(argv) < 1:
raise CmdLineInputError()
sub_cmd, argv_next = argv[0], argv[1:]
try:
if sub_cmd == "add":
- quorum_device_add_cmd(lib, argv_next, modificators)
+ quorum_device_add_cmd(lib, argv_next, modifiers)
elif sub_cmd == "heuristics":
- quorum_device_heuristics_cmd(lib, argv_next, modificators)
+ quorum_device_heuristics_cmd(lib, argv_next, modifiers)
elif sub_cmd == "remove":
- quorum_device_remove_cmd(lib, argv_next, modificators)
+ quorum_device_remove_cmd(lib, argv_next, modifiers)
elif sub_cmd == "status":
- quorum_device_status_cmd(lib, argv_next, modificators)
+ quorum_device_status_cmd(lib, argv_next, modifiers)
elif sub_cmd == "update":
- quorum_device_update_cmd(lib, argv_next, modificators)
+ quorum_device_update_cmd(lib, argv_next, modifiers)
else:
sub_cmd = ""
raise CmdLineInputError()
@@ -86,7 +86,7 @@ def quorum_device_heuristics_cmd(lib, argv, modifiers):
)
-def quorum_config_cmd(lib, argv, modificators):
+def quorum_config_cmd(lib, argv, modifiers):
if argv:
raise CmdLineInputError()
config = lib.quorum.get_config()
@@ -133,25 +133,25 @@ def quorum_config_to_str(config):
return lines
-def quorum_expected_votes_cmd(lib, argv, modificators):
+def quorum_expected_votes_cmd(lib, argv, modifiers):
if len(argv) != 1:
raise CmdLineInputError()
lib.quorum.set_expected_votes_live(argv[0])
-def quorum_status_cmd(lib, argv, modificators):
+def quorum_status_cmd(lib, argv, modifiers):
if argv:
raise CmdLineInputError()
print(lib.quorum.status())
-def quorum_update_cmd(lib, argv, modificators):
+def quorum_update_cmd(lib, argv, modifiers):
options = parse_args.prepare_options(argv)
if not options:
raise CmdLineInputError()
lib.quorum.set_options(
options,
- skip_offline_nodes=modificators["skip_offline_nodes"],
- force=modificators["force"]
+ skip_offline_nodes=modifiers["skip_offline_nodes"],
+ force=modifiers["force"]
)
def _parse_quorum_device_groups(arg_list):
@@ -172,7 +172,7 @@ def _parse_quorum_device_groups(arg_list):
)
return groups
-def quorum_device_add_cmd(lib, argv, modificators):
+def quorum_device_add_cmd(lib, argv, modifiers):
groups = _parse_quorum_device_groups(argv)
model_and_model_options = groups.get("model", [])
# we expect "model" keyword once, followed by the actual model value
@@ -194,25 +194,25 @@ def quorum_device_add_cmd(lib, argv, modificators):
model_options,
generic_options,
heuristics_options,
- force_model=modificators["force"],
- force_options=modificators["force"],
- skip_offline_nodes=modificators["skip_offline_nodes"]
+ force_model=modifiers["force"],
+ force_options=modifiers["force"],
+ skip_offline_nodes=modifiers["skip_offline_nodes"]
)
-def quorum_device_remove_cmd(lib, argv, modificators):
+def quorum_device_remove_cmd(lib, argv, modifiers):
if argv:
raise CmdLineInputError()
lib.quorum.remove_device(
- skip_offline_nodes=modificators["skip_offline_nodes"]
+ skip_offline_nodes=modifiers["skip_offline_nodes"]
)
-def quorum_device_status_cmd(lib, argv, modificators):
+def quorum_device_status_cmd(lib, argv, modifiers):
if argv:
raise CmdLineInputError()
- print(lib.quorum.status_device(modificators["full"]))
+ print(lib.quorum.status_device(modifiers["full"]))
-def quorum_device_update_cmd(lib, argv, modificators):
+def quorum_device_update_cmd(lib, argv, modifiers):
groups = _parse_quorum_device_groups(argv)
if not groups:
raise CmdLineInputError()
@@ -229,8 +229,8 @@ def quorum_device_update_cmd(lib, argv, modificators):
model_options,
generic_options,
heuristics_options,
- force_options=modificators["force"],
- skip_offline_nodes=modificators["skip_offline_nodes"]
+ force_options=modifiers["force"],
+ skip_offline_nodes=modifiers["skip_offline_nodes"]
)
def quorum_device_heuristics_remove_cmd(lib, argv, modifiers):
diff --git a/pcs/resource.py b/pcs/resource.py
index 082bd9d..75f2b1a 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -70,7 +70,7 @@ def resource_cmd(argv):
sub_cmd, argv_next = argv[0], argv[1:]
lib = utils.get_library_wrapper()
- modifiers = utils.get_modificators()
+ modifiers = utils.get_modifiers()
try:
if sub_cmd == "help":
@@ -476,7 +476,13 @@ def resource_create(lib, argv, modifiers):
def resource_move(argv,clear=False,ban=False):
other_options = []
if len(argv) == 0:
- utils.err ("must specify resource to move/unmove")
+ if clear:
+ msg = "must specify a resource to clear"
+ elif ban:
+ msg = "must specify a resource to ban"
+ else:
+ msg = "must specify a resource to move"
+ utils.err(msg)
resource_id = argv.pop(0)
@@ -568,11 +574,11 @@ def resource_move(argv,clear=False,ban=False):
if "--wait" in utils.pcs_options:
wait_timeout = utils.validate_wait_get_timeout()
+ allowed_nodes = set()
+ banned_nodes = set()
if not clear:
running_on = utils.resource_running_on(resource_id)
was_running = running_on["is_running"]
- allowed_nodes = set()
- banned_nodes = set()
if dest_node and ban: # ban, node specified
banned_nodes = set([dest_node])
elif dest_node: # move, node specified
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index d6e8f22..0f28a72 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -21,13 +21,14 @@ corosync_qdevice_net_client_certs_dir = os.path.join(
corosync_qdevice_net_client_ca_file_name = "qnetd-cacert.crt"
corosync_authkey_file = os.path.join(corosync_conf_dir, "authkey")
pacemaker_authkey_file = "/etc/pacemaker/authkey"
+pacemaker_authkey_file_mode = 0o600
cluster_conf_file = "/etc/cluster/cluster.conf"
fence_agent_binaries = "/usr/sbin/"
pengine_binary = "/usr/libexec/pacemaker/pengine"
crmd_binary = "/usr/libexec/pacemaker/crmd"
cib_binary = "/usr/libexec/pacemaker/cib"
stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.162"
+pcs_version = "0.9.163"
crm_report = pacemaker_binaries + "crm_report"
crm_verify = pacemaker_binaries + "crm_verify"
crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
diff --git a/pcs/snmp/agentx/updater.py b/pcs/snmp/agentx/updater.py
index 7ac20ae..c63f96b 100644
--- a/pcs/snmp/agentx/updater.py
+++ b/pcs/snmp/agentx/updater.py
@@ -82,8 +82,8 @@ def _str_oid_to_oid(sub_tree, str_oid):
sub_tree = _find_oid_in_sub_tree(sub_tree, section)
if sub_tree is None:
raise AssertionError(
- "oid section {0} ({1}) not found in {1} ({2})".format(
- section, str_oid, sub_tree.str_oid
+ "oid section '{0}' ({1}) not found in section '{2}'".format(
+ section, str_oid, oid_list[-1] if len(oid_list) else "."
)
)
oid_list.append(str(sub_tree.oid))
diff --git a/pcs/snmp/pcs_snmp_agent.8 b/pcs/snmp/pcs_snmp_agent.8
index dd6e4cc..99164a5 100644
--- a/pcs/snmp/pcs_snmp_agent.8
+++ b/pcs/snmp/pcs_snmp_agent.8
@@ -1,4 +1,4 @@
-.TH PCS_SNMP_AGENT "8" "November 2017" "pcs-snmp 0.9.162" "System Administration Utilities"
+.TH PCS_SNMP_AGENT "8" "February 2018" "pcs-snmp 0.9.163" "System Administration Utilities"
.SH NAME
pcs_snmp_agent \- a SNMP agent providing data about a corosync/pacemaker cluster
diff --git a/pcs/snmp/pcs_snmp_agent.logrotate b/pcs/snmp/pcs_snmp_agent.logrotate
deleted file mode 100644
index a53c21f..0000000
--- a/pcs/snmp/pcs_snmp_agent.logrotate
+++ /dev/null
@@ -1,10 +0,0 @@
-/var/log/pcs/snmp/pcs_snmp_agent.log {
- rotate 5
- weekly
- missingok
- notifempty
- compress
- delaycompress
- copytruncate
- create 0600 root root
-}
diff --git a/pcs/snmp/pcs_snmp_agent.service b/pcs/snmp/pcs_snmp_agent.service
index 112da73..cecb5af 100644
--- a/pcs/snmp/pcs_snmp_agent.service
+++ b/pcs/snmp/pcs_snmp_agent.service
@@ -4,7 +4,7 @@ Requires=snmpd.service
[Service]
EnvironmentFile=/etc/sysconfig/pcs_snmp_agent
-ExecStart=/usr/lib/pcs/pcs_snmp_agent > /dev/null
+ExecStart=/usr/lib/pcs/pcs_snmp_agent
Type=simple
TimeoutSec=500
diff --git a/pcs/snmp/settings.py b/pcs/snmp/settings.py
index 0559446..5f054ae 100644
--- a/pcs/snmp/settings.py
+++ b/pcs/snmp/settings.py
@@ -4,7 +4,7 @@ from __future__ import (
print_function,
)
-LOG_FILE = "/var/log/pcs/pcs_snmp_agent.log"
+LOG_FILE = "/var/log/pcsd/pcs_snmp_agent.log"
ENTERPRISES_OID = "1.3.6.1.4.1"
PACEMAKER_OID = ENTERPRISES_OID + ".32723"
PCS_OID = PACEMAKER_OID + ".100"
diff --git a/pcs/status.py b/pcs/status.py
index ec10d61..b06967a 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -14,6 +14,7 @@ from pcs import (
)
from pcs.qdevice import qdevice_status_cmd
from pcs.quorum import quorum_status_cmd
+from pcs.cli.booth.command import status as booth_status_cmd
from pcs.cli.common.console_report import indent
from pcs.cli.common.errors import CmdLineInputError
from pcs.lib.errors import LibraryError
@@ -22,53 +23,41 @@ from pcs.lib.pacemaker.values import is_false
from pcs.lib.resource_agent import _STONITH_ACTION_REPLACED_BY
from pcs.lib.sbd import get_sbd_service_name
-def status_cmd(argv):
- if len(argv) == 0:
+def status_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
full_status()
sys.exit(0)
- sub_cmd = argv.pop(0)
- if (sub_cmd == "help"):
- usage.status(argv)
- elif (sub_cmd == "resources"):
- resource.resource_show(argv)
- elif (sub_cmd == "groups"):
- resource.resource_group_list(argv)
- elif (sub_cmd == "cluster"):
- cluster_status(argv)
- elif (sub_cmd == "nodes"):
- nodes_status(argv)
- elif (sub_cmd == "pcsd"):
- cluster_pcsd_status(argv)
- elif (sub_cmd == "xml"):
- xml_status()
- elif (sub_cmd == "corosync"):
- corosync_status()
- elif sub_cmd == "qdevice":
- try:
- qdevice_status_cmd(
- utils.get_library_wrapper(),
- argv,
- utils.get_modificators()
- )
- except LibraryError as e:
- utils.process_library_reports(e.args)
- except CmdLineInputError as e:
- utils.exit_on_cmdline_input_errror(e, "status", sub_cmd)
- elif sub_cmd == "quorum":
- try:
- quorum_status_cmd(
- utils.get_library_wrapper(),
- argv,
- utils.get_modificators()
- )
- except LibraryError as e:
- utils.process_library_reports(e.args)
- except CmdLineInputError as e:
- utils.exit_on_cmdline_input_errror(e, "status", sub_cmd)
- else:
- usage.status()
- sys.exit(1)
+ sub_cmd, argv_next = argv[0], argv[1:]
+ try:
+ if sub_cmd == "help":
+ usage.status(argv_next)
+ elif sub_cmd == "booth":
+ booth_status_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "corosync":
+ corosync_status()
+ elif sub_cmd == "cluster":
+ cluster_status(argv_next)
+ elif sub_cmd == "groups":
+ resource.resource_group_list(argv_next)
+ elif sub_cmd == "nodes":
+ nodes_status(argv_next)
+ elif sub_cmd == "pcsd":
+ cluster_pcsd_status(argv_next)
+ elif sub_cmd == "qdevice":
+ qdevice_status_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "quorum":
+ quorum_status_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "resources":
+ resource.resource_show(argv_next)
+ elif sub_cmd == "xml":
+ xml_status()
+ else:
+ raise CmdLineInputError()
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "status", sub_cmd)
def full_status():
if "--hide-inactive" in utils.pcs_options and "--full" in utils.pcs_options:
@@ -125,6 +114,7 @@ def status_stonith_check():
stonith_enabled = True
stonith_devices = []
stonith_devices_id_action = []
+ stonith_devices_id_method_cycle = []
sbd_running = False
cib = utils.get_cib_dom()
@@ -155,6 +145,14 @@ def status_stonith_check():
stonith_devices_id_action.append(
resource.getAttribute("id")
)
+ if (
+ nvpair.getAttribute("name") == "method"
+ and
+ nvpair.getAttribute("value") == "cycle"
+ ):
+ stonith_devices_id_method_cycle.append(
+ resource.getAttribute("id")
+ )
if not utils.usefile:
# check if SBD daemon is running
@@ -171,14 +169,22 @@ def status_stonith_check():
if stonith_devices_id_action:
print(
- "WARNING: following stonith devices have the 'action' attribute"
- " set, it is recommended to set {0} instead: {1}".format(
+ "WARNING: following stonith devices have the 'action' option set, "
+ "it is recommended to set {0} instead: {1}".format(
", ".join(
["'{0}'".format(x) for x in _STONITH_ACTION_REPLACED_BY]
),
", ".join(sorted(stonith_devices_id_action))
)
)
+ if stonith_devices_id_method_cycle:
+ print(
+ "WARNING: following stonith devices have the 'method' option set "
+ "to 'cycle' which is potentially dangerous, please consider using "
+ "'onoff': {0}".format(
+ ", ".join(sorted(stonith_devices_id_method_cycle))
+ )
+ )
# Parse crm_mon for status
def nodes_status(argv):
diff --git a/pcs/stonith.py b/pcs/stonith.py
index 7e4327e..2054734 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -33,7 +33,7 @@ def stonith_cmd(argv):
sub_cmd, argv_next = argv[0], argv[1:]
lib = utils.get_library_wrapper()
- modifiers = utils.get_modificators()
+ modifiers = utils.get_modifiers()
try:
if sub_cmd == "help":
diff --git a/pcs/test/cib_resource/common.py b/pcs/test/cib_resource/common.py
index e92dd61..86f70a4 100644
--- a/pcs/test/cib_resource/common.py
+++ b/pcs/test/cib_resource/common.py
@@ -20,7 +20,7 @@ class ResourceTest(
TestCase,
get_assert_pcs_effect_mixin(get_cib_resources)
):
- empty_cib = rc("cib-empty-1.2.xml")
+ empty_cib = rc("cib-empty.xml")
temp_cib = rc("temp-cib.xml")
def setUp(self):
diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
index cfb2e64..93787b0 100644
--- a/pcs/test/cib_resource/test_create.py
+++ b/pcs/test/cib_resource/test_create.py
@@ -985,7 +985,7 @@ class Bundle(ResourceTest):
self.fixture_primitive("R1")
self.assert_pcs_fail(
"resource create R2 ocf:heartbeat:Dummy bundle R1",
- "Error: 'R1' is not bundle\n"
+ "Error: 'R1' is not a bundle\n"
)
def test_bundle_id_does_not_exist(self):
diff --git a/pcs/test/cib_resource/test_manage_unmanage.py b/pcs/test/cib_resource/test_manage_unmanage.py
index 744128a..80435b8 100644
--- a/pcs/test/cib_resource/test_manage_unmanage.py
+++ b/pcs/test/cib_resource/test_manage_unmanage.py
@@ -234,7 +234,7 @@ class ManageUnmanage(
self.assert_pcs_fail(
"resource unmanage A B",
- "Error: resource/clone/master/group/bundle 'B' does not exist\n"
+ "Error: bundle/clone/group/master/resource 'B' does not exist\n"
)
self.assert_resources_xml_in_cib(
"""
@@ -255,7 +255,7 @@ class ManageUnmanage(
self.assert_pcs_fail(
"resource manage A B",
- "Error: resource/clone/master/group/bundle 'B' does not exist\n"
+ "Error: bundle/clone/group/master/resource 'B' does not exist\n"
)
self.assert_resources_xml_in_cib(
"""
diff --git a/pcs/test/resources/cib-empty-1.2.xml b/pcs/test/resources/cib-empty-1.2.xml
index 17b82b0..68d2838 100644
--- a/pcs/test/resources/cib-empty-1.2.xml
+++ b/pcs/test/resources/cib-empty-1.2.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.0" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.8" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty-1.2.xml b/pcs/test/resources/cib-empty-2.0.xml
similarity index 100%
copy from pcs/test/resources/cib-empty-1.2.xml
copy to pcs/test/resources/cib-empty-2.0.xml
diff --git a/pcs/test/resources/cib-empty-2.6.xml b/pcs/test/resources/cib-empty-2.6.xml
index fc845f4..34bb3cb 100644
--- a/pcs/test/resources/cib-empty-2.6.xml
+++ b/pcs/test/resources/cib-empty-2.6.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.6" crm_feature_set="3.0.10" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.6" crm_feature_set="3.0.11" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty-2.8.xml b/pcs/test/resources/cib-empty-2.8.xml
index e965fb5..5afb0ad 100644
--- a/pcs/test/resources/cib-empty-2.8.xml
+++ b/pcs/test/resources/cib-empty-2.8.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.8" crm_feature_set="3.0.10" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.8" crm_feature_set="3.0.11" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty-with3nodes.xml b/pcs/test/resources/cib-empty-with3nodes.xml
index d24c75c..44e5c66 100644
--- a/pcs/test/resources/cib-empty-with3nodes.xml
+++ b/pcs/test/resources/cib-empty-with3nodes.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.0" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty-withnodes.xml b/pcs/test/resources/cib-empty-withnodes.xml
index 7cf3c44..638f2e8 100644
--- a/pcs/test/resources/cib-empty-withnodes.xml
+++ b/pcs/test/resources/cib-empty-withnodes.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.0" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty.xml b/pcs/test/resources/cib-empty.xml
index 04923b7..17b82b0 100644
--- a/pcs/test/resources/cib-empty.xml
+++ b/pcs/test/resources/cib-empty.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.12" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.0" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-large.xml b/pcs/test/resources/cib-large.xml
index 4f7532b..861ba4c 100644
--- a/pcs/test/resources/cib-large.xml
+++ b/pcs/test/resources/cib-large.xml
@@ -1,4 +1,4 @@
-<cib admin_epoch="0" cib-last-written="Thu Aug 23 16:49:17 2012" crm_feature_set="3.0.12" dc-uuid="2" epoch="1308" have-quorum="0" num_updates="1" update-client="crmd" update-origin="rh7-3" validate-with="pacemaker-1.2">
+<cib admin_epoch="0" cib-last-written="Thu Aug 23 16:49:17 2012" crm_feature_set="3.0.9" dc-uuid="2" epoch="1308" have-quorum="0" num_updates="1" update-client="crmd" update-origin="rh7-3" validate-with="pacemaker-2.0">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-largefile.xml b/pcs/test/resources/cib-largefile.xml
index 1fe3267..4f5a16a 100644
--- a/pcs/test/resources/cib-largefile.xml
+++ b/pcs/test/resources/cib-largefile.xml
@@ -1,4 +1,4 @@
-<cib epoch="148" num_updates="83" admin_epoch="0" validate-with="pacemaker-1.1" cib-last-written="Tue Oct 8 22:58:01 2013" update-origin="east-01" update-client="cibadmin" crm_feature_set="3.0.7" have-quorum="1" dc-uuid="101">
+<cib epoch="148" num_updates="83" admin_epoch="0" validate-with="pacemaker-2.0" cib-last-written="Tue Oct 8 22:58:01 2013" update-origin="east-01" update-client="cibadmin" crm_feature_set="3.0.9" have-quorum="1" dc-uuid="101">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
diff --git a/pcs/test/resources/resource_agent_ocf_pacemaker_remote.xml b/pcs/test/resources/resource_agent_ocf_pacemaker_remote.xml
new file mode 100644
index 0000000..f397b93
--- /dev/null
+++ b/pcs/test/resources/resource_agent_ocf_pacemaker_remote.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="remote" version="0.1">
+<version>1.0</version>
+<shortdesc lang="en">remote resource agent</shortdesc>
+ <parameters>
+ <parameter name="server" unique="1">
+ <longdesc lang="en">
+ Server location to connect to. This can be an ip address or hostname.
+ </longdesc>
+ <shortdesc lang="en">Server location</shortdesc>
+ <content type="string"/>
+ </parameter>
+ <parameter name="port">
+ <longdesc lang="en">
+ tcp port to connect to.
+ </longdesc>
+ <shortdesc lang="en">tcp port</shortdesc>
+ <content type="string" default="3121"/>
+ </parameter>
+ <parameter name="reconnect_interval" unique="0">
+ <longdesc lang="en">
+ Interval in seconds at which Pacemaker will attempt to reconnect to a
+ remote node after an active connection to the remote node has been
+ severed. When this value is nonzero, Pacemaker will retry the connection
+ indefinitely, at the specified interval. As with any time-based actions,
+ this is not guaranteed to be checked more frequently than the value of
+ the cluster-recheck-interval cluster option.
+ </longdesc>
+ <shortdesc lang="en">reconnect interval</shortdesc>
+ <content type="string" default="0"/>
+ </parameter>
+ </parameters>
+ <actions>
+ <action name="start" timeout="60" />
+ <action name="stop" timeout="60" />
+ <action name="reload" timeout="60" />
+ <action name="monitor" timeout="30" />
+ <action name="migrate_to" timeout="60" />
+ <action name="migrate_from" timeout="60" />
+ <action name="meta-data" timeout="5" />
+ </actions>
+</resource-agent>
diff --git a/pcs/test/resources/stonith_agent_fence_simple.xml b/pcs/test/resources/stonith_agent_fence_simple.xml
new file mode 100644
index 0000000..bb86af2
--- /dev/null
+++ b/pcs/test/resources/stonith_agent_fence_simple.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" ?>
+<resource-agent
+ name="fence_simple"
+ shortdesc="Basic fence agent for pcs tests"
+>
+ <longdesc>
+ This is a testing fence agent. Its purpose is to provide a mock of a fence
+ agent which is always available no matter what is the configuration of a
+ system pcs test suite runs on.
+ </longdesc>
+ <vendor-url>https://github.com/ClusterLabs/pcs</vendor-url>
+ <parameters>
+ <parameter name="must-set" unique="0" required="1">
+ <content type="string" />
+ <shortdesc lang="en">An example of a required attribute</shortdesc>
+ </parameter>
+ <parameter name="may-set" unique="0" required="0">
+ <content type="string" />
+ <shortdesc lang="en">An example of an optional attribute</shortdesc>
+ </parameter>
+ </parameters>
+ <actions>
+ <action name="on" automatic="0"/>
+ <action name="off" />
+ <action name="reboot" />
+ <action name="status" />
+ <action name="list" />
+ <action name="list-status" />
+ <action name="monitor" />
+ <action name="metadata" />
+ <action name="validate-all" />
+ </actions>
+</resource-agent>
diff --git a/pcs/test/resources/stonithd_metadata.xml b/pcs/test/resources/stonithd_metadata.xml
new file mode 100644
index 0000000..fc638a2
--- /dev/null
+++ b/pcs/test/resources/stonithd_metadata.xml
@@ -0,0 +1,156 @@
+<?xml version="1.0"?><!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="stonithd">
+ <version>1.0</version>
+ <longdesc lang="en">This is a fake resource that details the instance attributes handled by stonithd.</longdesc>
+ <shortdesc lang="en">Options available for all stonith resources</shortdesc>
+ <parameters>
+ <parameter name="priority" unique="0">
+ <shortdesc lang="en">The priority of the stonith resource. Devices are tried in order of highest priority to lowest.</shortdesc>
+ <content type="integer" default="0"/>
+ </parameter>
+ <parameter name="pcmk_host_argument" unique="0">
+ <shortdesc lang="en">Advanced use only: An alternate parameter to supply instead of 'port'</shortdesc>
+ <longdesc lang="en">Some devices do not support the standard 'port' parameter or may provide additional ones.
+Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced.
+A value of 'none' can be used to tell the cluster not to supply any additional parameters.
+ </longdesc>
+ <content type="string" default="port"/>
+ </parameter>
+ <parameter name="pcmk_host_map" unique="0">
+ <shortdesc lang="en">A mapping of host names to ports numbers for devices that do not support host names.</shortdesc>
+ <longdesc lang="en">Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2</longdesc>
+ <content type="string" default=""/>
+ </parameter>
+ <parameter name="pcmk_host_list" unique="0">
+ <shortdesc lang="en">A list of machines controlled by this device (Optional unless pcmk_host_check=static-list).</shortdesc>
+ <content type="string" default=""/>
+ </parameter>
+ <parameter name="pcmk_host_check" unique="0">
+ <shortdesc lang="en">How to determine which machines are controlled by the device.</shortdesc>
+ <longdesc lang="en">Allowed values: dynamic-list (query the device), static-list (check the pcmk_host_list attribute), none (assume every device can fence every machine)</longdesc>
+ <content type="string" default="dynamic-list"/>
+ </parameter>
+ <parameter name="pcmk_delay_max" unique="0">
+ <shortdesc lang="en">Enable a random delay for stonith actions and specify the maximum of random delay.</shortdesc>
+ <longdesc lang="en">This prevents double fencing when using slow devices such as sbd.
+Use this to enable a random delay for stonith actions.
+The overall delay is derived from this random delay value adding a static delay so that the sum is kept below the maximum delay.</longdesc>
+ <content type="time" default="0s"/>
+ </parameter>
+ <parameter name="pcmk_delay_base" unique="0">
+ <shortdesc lang="en">Enable a base delay for stonith actions and specify base delay value.</shortdesc>
+ <longdesc lang="en">This prevents double fencing when different delays are configured on the nodes.
+Use this to enable a static delay for stonith actions.
+The overall delay is derived from a random delay value adding this static delay so that the sum is kept below the maximum delay.</longdesc>
+ <content type="time" default="0s"/>
+ </parameter>
+ <parameter name="pcmk_action_limit" unique="0">
+ <shortdesc lang="en">The maximum number of actions can be performed in parallel on this device</shortdesc>
+ <longdesc lang="en">Pengine property concurrent-fencing=true needs to be configured first.
+Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.</longdesc>
+ <content type="integer" default="1"/>
+ </parameter>
+ <parameter name="pcmk_reboot_action" unique="0">
+ <shortdesc lang="en">Advanced use only: An alternate command to run instead of 'reboot'</shortdesc>
+ <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones.
+Use this to specify an alternate, device-specific, command that implements the 'reboot' action.</longdesc>
+ <content type="string" default="reboot"/>
+ </parameter>
+ <parameter name="pcmk_reboot_timeout" unique="0">
+ <shortdesc lang="en">Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout</shortdesc>
+ <longdesc lang="en">Some devices need much more/less time to complete than normal.
+Use this to specify an alternate, device-specific, timeout for 'reboot' actions.</longdesc>
+ <content type="time" default="60s"/>
+ </parameter>
+ <parameter name="pcmk_reboot_retries" unique="0">
+ <shortdesc lang="en">Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period</shortdesc>
+ <longdesc lang="en">Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'reboot' actions before giving up.</longdesc>
+ <content type="integer" default="2"/>
+ </parameter>
+ <parameter name="pcmk_off_action" unique="0">
+ <shortdesc lang="en">Advanced use only: An alternate command to run instead of 'off'</shortdesc>
+ <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones.
+Use this to specify an alternate, device-specific, command that implements the 'off' action.</longdesc>
+ <content type="string" default="off"/>
+ </parameter>
+ <parameter name="pcmk_off_timeout" unique="0">
+ <shortdesc lang="en">Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout</shortdesc>
+ <longdesc lang="en">Some devices need much more/less time to complete than normal.
+Use this to specify an alternate, device-specific, timeout for 'off' actions.</longdesc>
+ <content type="time" default="60s"/>
+ </parameter>
+ <parameter name="pcmk_off_retries" unique="0">
+ <shortdesc lang="en">Advanced use only: The maximum number of times to retry the 'off' command within the timeout period</shortdesc>
+ <longdesc lang="en">Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'off' actions before giving up.</longdesc>
+ <content type="integer" default="2"/>
+ </parameter>
+ <parameter name="pcmk_on_action" unique="0">
+ <shortdesc lang="en">Advanced use only: An alternate command to run instead of 'on'</shortdesc>
+ <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones.
+Use this to specify an alternate, device-specific, command that implements the 'on' action.</longdesc>
+ <content type="string" default="on"/>
+ </parameter>
+ <parameter name="pcmk_on_timeout" unique="0">
+ <shortdesc lang="en">Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout</shortdesc>
+ <longdesc lang="en">Some devices need much more/less time to complete than normal.
+Use this to specify an alternate, device-specific, timeout for 'on' actions.</longdesc>
+ <content type="time" default="60s"/>
+ </parameter>
+ <parameter name="pcmk_on_retries" unique="0">
+ <shortdesc lang="en">Advanced use only: The maximum number of times to retry the 'on' command within the timeout period</shortdesc>
+ <longdesc lang="en">Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'on' actions before giving up.</longdesc>
+ <content type="integer" default="2"/>
+ </parameter>
+ <parameter name="pcmk_list_action" unique="0">
+ <shortdesc lang="en">Advanced use only: An alternate command to run instead of 'list'</shortdesc>
+ <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones.
+Use this to specify an alternate, device-specific, command that implements the 'list' action.</longdesc>
+ <content type="string" default="list"/>
+ </parameter>
+ <parameter name="pcmk_list_timeout" unique="0">
+ <shortdesc lang="en">Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout</shortdesc>
+ <longdesc lang="en">Some devices need much more/less time to complete than normal.
+Use this to specify an alternate, device-specific, timeout for 'list' actions.</longdesc>
+ <content type="time" default="60s"/>
+ </parameter>
+ <parameter name="pcmk_list_retries" unique="0">
+ <shortdesc lang="en">Advanced use only: The maximum number of times to retry the 'list' command within the timeout period</shortdesc>
+ <longdesc lang="en">Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'list' actions before giving up.</longdesc>
+ <content type="integer" default="2"/>
+ </parameter>
+ <parameter name="pcmk_monitor_action" unique="0">
+ <shortdesc lang="en">Advanced use only: An alternate command to run instead of 'monitor'</shortdesc>
+ <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones.
+Use this to specify an alternate, device-specific, command that implements the 'monitor' action.</longdesc>
+ <content type="string" default="monitor"/>
+ </parameter>
+ <parameter name="pcmk_monitor_timeout" unique="0">
+ <shortdesc lang="en">Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout</shortdesc>
+ <longdesc lang="en">Some devices need much more/less time to complete than normal.
+Use this to specify an alternate, device-specific, timeout for 'monitor' actions.</longdesc>
+ <content type="time" default="60s"/>
+ </parameter>
+ <parameter name="pcmk_monitor_retries" unique="0">
+ <shortdesc lang="en">Advanced use only: The maximum number of times to retry the 'monitor' command within the timeout period</shortdesc>
+ <longdesc lang="en">Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'monitor' actions before giving up.</longdesc>
+ <content type="integer" default="2"/>
+ </parameter>
+ <parameter name="pcmk_status_action" unique="0">
+ <shortdesc lang="en">Advanced use only: An alternate command to run instead of 'status'</shortdesc>
+ <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones.
+Use this to specify an alternate, device-specific, command that implements the 'status' action.</longdesc>
+ <content type="string" default="status"/>
+ </parameter>
+ <parameter name="pcmk_status_timeout" unique="0">
+ <shortdesc lang="en">Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout</shortdesc>
+ <longdesc lang="en">Some devices need much more/less time to complete than normal.
+Use this to specify an alternate, device-specific, timeout for 'status' actions.</longdesc>
+ <content type="time" default="60s"/>
+ </parameter>
+ <parameter name="pcmk_status_retries" unique="0">
+ <shortdesc lang="en">Advanced use only: The maximum number of times to retry the 'status' command within the timeout period</shortdesc>
+ <longdesc lang="en">Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'status' actions before giving up.</longdesc>
+ <content type="integer" default="2"/>
+ </parameter>
+ </parameters>
+</resource-agent>
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
index cce2d00..809596b 100755
--- a/pcs/test/suite.py
+++ b/pcs/test/suite.py
@@ -4,14 +4,10 @@ from __future__ import (
division,
print_function,
)
-import sys
-import os.path
-
-is_2_7_or_higher = sys.version_info[0] > 2 or sys.version_info[1] > 6
-
-if is_2_7_or_higher:
- import importlib
+import importlib
+import os.path
+import sys
PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)
@@ -20,6 +16,7 @@ sys.path.insert(0, PACKAGE_DIR)
from pcs.test.tools import pcs_unittest as unittest
+
def prepare_test_name(test_name):
"""
Sometimes we have test easy accessible with fs path format like:
@@ -33,13 +30,9 @@ def prepare_test_name(test_name):
in such cause is extension removed
"""
candidate = test_name.replace("/", ".")
- if not is_2_7_or_higher:
- return candidate
-
py_extension = ".py"
if not candidate.endswith(py_extension):
return candidate
-
try:
importlib.import_module(candidate)
return candidate
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index 6df6000..a59beb0 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -17,8 +17,8 @@ from pcs.test.tools.pcs_runner import (
PcsRunner,
)
-old_cib = rc("cib-empty.xml")
-empty_cib = rc("cib-empty-1.2.xml")
+old_cib = rc("cib-empty-1.2.xml")
+empty_cib = rc("cib-empty.xml")
temp_cib = rc("temp-cib.xml")
class ACLTest(unittest.TestCase, AssertPcsMixin):
@@ -80,19 +80,19 @@ class ACLTest(unittest.TestCase, AssertPcsMixin):
ac(o,"")
o, r = pcs("acl user create user1 roleX")
- ac(o, "Error: role 'roleX' does not exist\n")
+ ac(o, "Error: ACL role 'roleX' does not exist\n")
self.assertEqual(1, r)
o, r = pcs("acl user create user1 role1 roleX")
- ac(o, "Error: role 'roleX' does not exist\n")
+ ac(o, "Error: ACL role 'roleX' does not exist\n")
self.assertEqual(1, r)
o, r = pcs("acl group create group1 roleX")
- ac(o, "Error: role 'roleX' does not exist\n")
+ ac(o, "Error: ACL role 'roleX' does not exist\n")
self.assertEqual(1, r)
o, r = pcs("acl group create group1 role1 roleX")
- ac(o, "Error: role 'roleX' does not exist\n")
+ ac(o, "Error: ACL role 'roleX' does not exist\n")
self.assertEqual(1, r)
o, r = pcs("acl")
@@ -164,11 +164,11 @@ Role: role3
o,r = pcs("acl role assign role1 to noexist")
assert r == 1
- ac(o,"Error: user/group 'noexist' does not exist\n")
+ ac(o,"Error: ACL group/ACL user 'noexist' does not exist\n")
o,r = pcs("acl role assign noexist to user1")
assert r == 1
- ac(o,"Error: role 'noexist' does not exist\n")
+ ac(o,"Error: ACL role 'noexist' does not exist\n")
o,r = pcs("acl role assign role3 to user1")
assert r == 0
@@ -184,7 +184,7 @@ Role: role3
o,r = pcs("acl role unassign role3 from noexist")
assert r == 1
- ac(o,"Error: user/group 'noexist' does not exist\n")
+ ac(o,"Error: ACL group/ACL user 'noexist' does not exist\n")
o,r = pcs("acl role unassign role3 from user1")
assert r == 0
@@ -395,7 +395,7 @@ Group: group2
o,r = pcs("acl group delete user1")
assert r == 1
- ac(o,"Error: 'user1' is not an acl group\n")
+ ac(o,"Error: 'user1' is not an ACL group\n")
o,r = pcs("acl")
ac(o, """\
@@ -584,7 +584,7 @@ User: user2
o,r = pcs("acl role delete role2")
assert r == 1
- ac(o,"Error: role 'role2' does not exist\n")
+ ac(o,"Error: ACL role 'role2' does not exist\n")
o,r = pcs("acl role delete role1")
assert r == 0
@@ -640,7 +640,7 @@ User: user2
assert r == 0
o,r = pcs("acl permission delete role4-deny")
- ac(o,"Error: permission 'role4-deny' does not exist\n")
+ ac(o,"Error: ACL permission 'role4-deny' does not exist\n")
assert r == 1
o,r = pcs("acl show")
@@ -825,7 +825,7 @@ Role: role4
self.assert_pcs_success("acl group create group1")
self.assert_pcs_fail(
"acl role assign role1 to user group1",
- "Error: 'group1' is not an acl user\n"
+ "Error: 'group1' is not an ACL user\n"
)
def test_assign_unassign_role_to_user_with_to(self):
@@ -861,7 +861,7 @@ Role: role4
self.assert_pcs_success("acl user create user1")
self.assert_pcs_fail(
"acl role assign role1 to group user1",
- "Error: 'user1' is not an acl group\n"
+ "Error: 'user1' is not an ACL group\n"
)
def test_assign_unassign_role_to_group_with_to(self):
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 01998f0..4e5bce7 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -2761,26 +2761,6 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
assert r == 0
ac(o, "No uidgids configured in cluster.conf\n")
- @skip_unless_pacemaker_version((1, 1, 11), "CIB schema upgrade")
- def testClusterUpgrade(self):
- with open(temp_cib) as myfile:
- data = myfile.read()
- assert data.find("pacemaker-1.2") != -1
- assert data.find("pacemaker-2.") == -1
-
- o,r = pcs("cluster cib-upgrade")
- ac(o,"Cluster CIB has been upgraded to latest version\n")
- assert r == 0
-
- with open(temp_cib) as myfile:
- data = myfile.read()
- assert data.find("pacemaker-1.2") == -1
- assert data.find("pacemaker-2.") != -1
-
- o,r = pcs("cluster cib-upgrade")
- ac(o,"Cluster CIB has been upgraded to latest version\n")
- assert r == 0
-
def test_can_not_setup_cluster_for_unknown_transport_type(self):
if utils.is_rhel6():
return
@@ -2992,6 +2972,33 @@ logging {
""")
+class ClusterUpgradeTest(unittest.TestCase, AssertPcsMixin):
+ def setUp(self):
+ shutil.copy(rc("cib-empty-1.2.xml"), temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+
+ @skip_unless_pacemaker_version((1, 1, 11), "CIB schema upgrade")
+ def testClusterUpgrade(self):
+ with open(temp_cib) as myfile:
+ data = myfile.read()
+ assert data.find("pacemaker-1.2") != -1
+ assert data.find("pacemaker-2.") == -1
+
+ o,r = pcs("cluster cib-upgrade")
+ ac(o,"Cluster CIB has been upgraded to latest version\n")
+ assert r == 0
+
+ with open(temp_cib) as myfile:
+ data = myfile.read()
+ assert data.find("pacemaker-1.2") == -1
+ assert data.find("pacemaker-2.") != -1
+
+ o,r = pcs("cluster cib-upgrade")
+ ac(o,"Cluster CIB has been upgraded to latest version\n")
+ assert r == 0
+
+
+
class ClusterStartStop(unittest.TestCase, AssertPcsMixin):
def setUp(self):
self.pcs_runner = PcsRunner()
diff --git a/pcs/test/test_cluster_pcmk_remote.py b/pcs/test/test_cluster_pcmk_remote.py
index e5ce410..e797287 100644
--- a/pcs/test/test_cluster_pcmk_remote.py
+++ b/pcs/test/test_cluster_pcmk_remote.py
@@ -177,7 +177,7 @@ class NodeAddGuest(ResourceTest):
def test_fail_when_resource_does_not_exists(self):
self.assert_pcs_fail(
"cluster node add-guest some-host non-existent",
- "Error: primitive 'non-existent' does not exist\n"
+ "Error: resource 'non-existent' does not exist\n"
)
def test_fail_when_option_remote_node_specified(self):
@@ -207,7 +207,7 @@ class NodeAddGuest(ResourceTest):
"cluster node add-guest node-host G a=b",
"Error: invalid guest option 'a', allowed options are:"
" remote-addr, remote-connect-timeout, remote-port\n"
- "Error: primitive 'G' does not exist\n"
+ "Error: resource 'G' does not exist\n"
)
def test_fail_when_disallowed_option_appear(self):
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index 9f0bc5d..1c176a4 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -2684,7 +2684,7 @@ class CommonCreateWithSet(ConstraintBaseTest):
def test_refuse_when_resource_does_not_exist(self):
self.assert_pcs_fail(
'constraint ticket set A C setoptions ticket=T',
- ["Error: resource 'C' does not exist"]
+ ["Error: bundle/clone/group/master/resource 'C' does not exist"]
)
class TicketCreateWithSet(ConstraintBaseTest):
@@ -2732,7 +2732,7 @@ class TicketAdd(ConstraintBaseTest):
def test_refuse_noexistent_resource_id(self):
self.assert_pcs_fail(
'constraint ticket add T master AA loss-policy=fence',
- ["Error: resource 'AA' does not exist"]
+ ["Error: bundle/clone/group/master/resource 'AA' does not exist"]
)
def test_refuse_invalid_role(self):
diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
index cc3a8d4..01ee7b3 100644
--- a/pcs/test/test_lib_commands_sbd.py
+++ b/pcs/test/test_lib_commands_sbd.py
@@ -30,10 +30,6 @@ from pcs.lib.node import (
NodeAddresses,
NodeAddressesList,
)
-from pcs.lib.external import (
- NodeCommunicator,
- CommandRunner,
-)
import pcs.lib.commands.sbd as cmd_sbd
@@ -49,23 +45,6 @@ def _assert_equal_list_of_dictionaries_without_order(expected, actual):
raise AssertionError("Expected but not given: {0}".format(item))
-class CommandSbdTest(TestCase):
- def setUp(self):
- self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
- self.mock_log = mock.MagicMock(spec_set=logging.Logger)
- self.mock_env.logger = self.mock_log
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_env.node_communicator.return_value = self.mock_com
- self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- self.mock_env.cmd_runner.return_value = self.mock_run
- self.mock_rep = MockLibraryReportProcessor()
- self.mock_env.report_processor = self.mock_rep
-
- self.node_list = NodeAddressesList(
- [NodeAddresses("node" + str(i)) for i in range(3)]
- )
-
-
class ValidateSbdOptionsTest(TestCase):
def setUp(self):
self.allowed_sbd_options = sorted([
@@ -93,7 +72,7 @@ class ValidateSbdOptionsTest(TestCase):
[
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_UNKNOWN"],
"option_type": None,
@@ -104,7 +83,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["another_unknown_option"],
"option_type": None,
@@ -130,7 +109,7 @@ class ValidateSbdOptionsTest(TestCase):
[
(
Severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_UNKNOWN"],
"option_type": None,
@@ -141,7 +120,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["another_unknown_option"],
"option_type": None,
@@ -169,7 +148,7 @@ class ValidateSbdOptionsTest(TestCase):
[
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_WATCHDOG_DEV"],
"option_type": None,
@@ -180,7 +159,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_OPTS"],
"option_type": None,
@@ -191,7 +170,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_DEVICE"],
"option_type": None,
@@ -218,7 +197,7 @@ class ValidateSbdOptionsTest(TestCase):
[
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_WATCHDOG_DEV"],
"option_type": None,
@@ -229,7 +208,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_OPTS"],
"option_type": None,
@@ -240,7 +219,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_UNKNOWN"],
"option_type": None,
@@ -268,7 +247,7 @@ class ValidateSbdOptionsTest(TestCase):
[
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_WATCHDOG_DEV"],
"option_type": None,
@@ -279,7 +258,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_OPTS"],
"option_type": None,
@@ -290,7 +269,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_UNKNOWN"],
"option_type": None,
@@ -301,7 +280,7 @@ class ValidateSbdOptionsTest(TestCase):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["SBD_PACEMAKER"],
"option_type": None,
@@ -725,7 +704,7 @@ class InitializeBlockDevicesTest(CommonTest):
),
(
Severities.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": sorted(["another_one", "unknown_option"]),
"option_type": "option",
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
index f2b4c62..a515b30 100644
--- a/pcs/test/test_lib_corosync_config_facade.py
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -579,7 +579,7 @@ quorum {
lambda: facade.set_quorum_options(reporter, options),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["nonsense1"],
"option_type": "quorum",
@@ -594,7 +594,7 @@ quorum {
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["nonsense2"],
"option_type": "quorum",
@@ -1414,7 +1414,7 @@ quorum {
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_model_option"],
"option_type": "quorum device model",
@@ -1477,7 +1477,7 @@ quorum {
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_generic_option"],
"option_type": "quorum device",
@@ -1488,7 +1488,7 @@ quorum {
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["model"],
"option_type": "quorum device",
@@ -1726,7 +1726,7 @@ quorum {
),
(
severity.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_model_option"],
"option_type": "quorum device model",
@@ -1779,7 +1779,7 @@ quorum {
),
(
severity.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_generic_option"],
"option_type": "quorum device",
@@ -2148,7 +2148,7 @@ class UpdateQuorumDeviceTest(TestCase):
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_model_option"],
"option_type": "quorum device model",
@@ -2259,7 +2259,7 @@ class UpdateQuorumDeviceTest(TestCase):
),
(
severity.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_model_option"],
"option_type": "quorum device model",
@@ -2380,7 +2380,7 @@ class UpdateQuorumDeviceTest(TestCase):
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_generic_option"],
"option_type": "quorum device",
@@ -2391,7 +2391,7 @@ class UpdateQuorumDeviceTest(TestCase):
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["model"],
"option_type": "quorum device",
@@ -2440,7 +2440,7 @@ class UpdateQuorumDeviceTest(TestCase):
),
(
severity.ERROR,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["model"],
"option_type": "quorum device",
@@ -2488,7 +2488,7 @@ class UpdateQuorumDeviceTest(TestCase):
[
(
severity.WARNING,
- report_codes.INVALID_OPTION,
+ report_codes.INVALID_OPTIONS,
{
"option_names": ["bad_generic_option"],
"option_type": "quorum device",
diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
index 7afeb9f..5f66b6d 100644
--- a/pcs/test/test_lib_corosync_live.py
+++ b/pcs/test/test_lib_corosync_live.py
@@ -10,13 +10,12 @@ import os.path
from pcs.test.tools.assertions import assert_raise_library_error
from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_unittest import mock, skip
+from pcs.test.tools.pcs_unittest import mock
from pcs import settings
from pcs.common import report_codes
from pcs.lib.errors import ReportItemSeverity as severity
-from pcs.lib.node import NodeAddresses
-from pcs.lib.external import CommandRunner, NodeCommunicator
+from pcs.lib.external import CommandRunner
from pcs.lib.corosync import live as lib
@@ -70,22 +69,6 @@ class GetLocalClusterConfTest(TestCase):
)
)
- at skip("TODO: rewrite for pcs.lib.communication.corosync.DistributeCorosyncConf")
-class SetRemoteCorosyncConfTest(TestCase):
- def test_success(self):
- config = "test {\nconfig: data\n}\n"
- node = NodeAddresses("node1")
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_node.return_value = "dummy return"
-
- lib.set_remote_corosync_conf(mock_communicator, node, config)
-
- mock_communicator.call_node.assert_called_once_with(
- node,
- "remote/set_corosync_conf",
- "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A"
- )
-
class ReloadConfigTest(TestCase):
def path(self, name):
@@ -210,4 +193,3 @@ class SetExpectedVotesTest(TestCase):
mock_runner.run.assert_called_once_with([
self.path("corosync-quorumtool"), "-e", "3"
])
-
diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
index 60b67f6..7fea70b 100644
--- a/pcs/test/test_lib_corosync_qdevice_net.py
+++ b/pcs/test/test_lib_corosync_qdevice_net.py
@@ -4,24 +4,19 @@ from __future__ import (
print_function,
)
-from pcs.test.tools.pcs_unittest import TestCase, skip
-
import base64
import os.path
-
-from pcs.test.tools.pcs_unittest import mock
-from pcs.test.tools.assertions import assert_raise_library_error
-from pcs.test.tools.misc import get_test_resource
+from textwrap import dedent
from pcs import settings
from pcs.common import report_codes
from pcs.lib import reports
from pcs.lib.errors import ReportItemSeverity as severity, LibraryError
-from pcs.lib.external import (
- CommandRunner,
- NodeCommunicator,
- NodeCommunicationException,
-)
+from pcs.lib.external import CommandRunner
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import get_test_resource
+from pcs.test.tools.pcs_unittest import TestCase, mock
import pcs.lib.corosync.qdevice_net as lib
@@ -221,41 +216,47 @@ class QdeviceConnectedClustersTest(TestCase):
)
def test_one_cluster(self):
- status = """\
-Cluster "rhel72":
- Algorithm: LMS
- Tie-breaker: Node with lowest node ID
- Node ID 2:
- Client address: ::ffff:192.168.122.122:59738
- Configured node list: 1, 2
- Membership node list: 1, 2
- Vote: ACK (ACK)
- Node ID 1:
- Client address: ::ffff:192.168.122.121:43420
- Configured node list: 1, 2
- Membership node list: 1, 2
- Vote: ACK (ACK)
-"""
+ status = dedent(
+ """\
+ Cluster "rhel72":
+ Algorithm: LMS
+ Tie-breaker: Node with lowest node ID
+ Node ID 2:
+ Client address: ::ffff:192.168.122.122:59738
+ Configured node list: 1, 2
+ Membership node list: 1, 2
+ Vote: ACK (ACK)
+ Node ID 1:
+ Client address: ::ffff:192.168.122.121:43420
+ Configured node list: 1, 2
+ Membership node list: 1, 2
+ Vote: ACK (ACK)
+ """
+ )
self.assertEqual(
["rhel72"],
lib.qdevice_connected_clusters(status)
)
def test_more_clusters(self):
- status = """\
-Cluster "rhel72":
-Cluster "rhel73":
-"""
+ status = dedent(
+ """\
+ Cluster "rhel72":
+ Cluster "rhel73":
+ """
+ )
self.assertEqual(
["rhel72", "rhel73"],
lib.qdevice_connected_clusters(status)
)
def test_invalid_status(self):
- status = """\
-Cluster:
- Cluster "rhel72":
-"""
+ status = dedent(
+ """\
+ Cluster:
+ Cluster "rhel72":
+ """
+ )
self.assertEqual(
[],
lib.qdevice_connected_clusters(status)
@@ -801,232 +802,19 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
_client_cert_tool, "-m", "-c", self.mock_tmpfile.name
])
- at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.GetCaCert")
-class RemoteQdeviceGetCaCertificate(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- expected_result = "abcd".encode("utf-8")
- mock_communicator.call_host.return_value = base64.b64encode(
- expected_result
- )
-
- result = lib.remote_qdevice_get_ca_certificate(
- mock_communicator,
- "qdevice host"
- )
- self.assertEqual(result, expected_result)
-
- mock_communicator.call_host.assert_called_once_with(
- "qdevice host",
- "remote/qdevice_net_get_ca_certificate",
- None
- )
-
- def test_decode_error(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_host.return_value = "error"
-
- assert_raise_library_error(
- lambda: lib.remote_qdevice_get_ca_certificate(
- mock_communicator,
- "qdevice host"
- ),
- (
- severity.ERROR,
- report_codes.INVALID_RESPONSE_FORMAT,
- {
- "node": "qdevice host",
- }
- )
- )
-
- def test_comunication_error(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_host.side_effect = NodeCommunicationException(
- "qdevice host", "command", "reason"
- )
-
- self.assertRaises(
- NodeCommunicationException,
- lambda: lib.remote_qdevice_get_ca_certificate(
- mock_communicator,
- "qdevice host"
- )
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.ClientSetup")
-class RemoteClientSetupTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = "node address"
- ca_cert = "CA certificate".encode("utf-8")
-
- lib.remote_client_setup(mock_communicator, node, ca_cert)
-
- mock_communicator.call_node.assert_called_once_with(
- node,
- "remote/qdevice_net_client_init_certificate_storage",
- "ca_certificate={0}".format(
- cert_to_url(ca_cert)
- )
- )
-
- def test_comunication_error(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_node.side_effect = NodeCommunicationException(
- "node address", "command", "reason"
- )
-
- self.assertRaises(
- NodeCommunicationException,
- lambda: lib.remote_client_setup(
- mock_communicator,
- "node address",
- "ca cert".encode("utf-8")
- )
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.SignCertificate")
-class RemoteSignCertificateRequestTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- cert_request = "request".encode("utf-8")
- expected_result = "abcd".encode("utf-8")
- host = "qdevice host"
- cluster_name = "ClusterName"
- mock_communicator.call_host.return_value = base64.b64encode(
- expected_result
- )
-
- result = lib.remote_sign_certificate_request(
- mock_communicator,
- host,
- cert_request,
- cluster_name
- )
- self.assertEqual(result, expected_result)
-
- mock_communicator.call_host.assert_called_once_with(
- host,
- "remote/qdevice_net_sign_node_certificate",
- "certificate_request={0}&cluster_name={1}".format(
- cert_to_url(cert_request),
- cluster_name
- )
- )
-
- def test_decode_error(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_host.return_value = "error"
-
- assert_raise_library_error(
- lambda: lib.remote_sign_certificate_request(
- mock_communicator,
- "qdevice host",
- "cert request".encode("utf-8"),
- "cluster name"
- ),
- (
- severity.ERROR,
- report_codes.INVALID_RESPONSE_FORMAT,
- {
- "node": "qdevice host",
- }
- )
- )
-
- def test_comunication_error(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_host.side_effect = NodeCommunicationException(
- "qdevice host", "command", "reason"
- )
-
- self.assertRaises(
- NodeCommunicationException,
- lambda: lib.remote_sign_certificate_request(
- mock_communicator,
- "qdevice host",
- "cert request".encode("utf-8"),
- "cluster name"
- )
- )
-
- at skip(
- "TODO: rewrite for "
- "pcs.lib.communication.qdevice_net.ClientImportCertificateAndKey"
-)
-class RemoteClientImportCertificateAndKeyTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = "node address"
- pk12_cert = "pk12 certificate".encode("utf-8")
-
- lib.remote_client_import_certificate_and_key(
- mock_communicator,
- node,
- pk12_cert
- )
-
- mock_communicator.call_node.assert_called_once_with(
- node,
- "remote/qdevice_net_client_import_certificate",
- "certificate={0}".format(
- cert_to_url(pk12_cert)
- )
- )
-
- def test_comunication_error(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_node.side_effect = NodeCommunicationException(
- "node address", "command", "reason"
- )
-
- self.assertRaises(
- NodeCommunicationException,
- lambda: lib.remote_client_import_certificate_and_key(
- mock_communicator,
- "node address",
- "pk12 cert".encode("utf-8")
- )
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.ClientDestroy")
-class RemoteClientDestroy(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = "node address"
-
- lib.remote_client_destroy(mock_communicator, node)
-
- mock_communicator.call_node.assert_called_once_with(
- node,
- "remote/qdevice_net_client_destroy",
- None
- )
-
- def test_comunication_error(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- mock_communicator.call_node.side_effect = NodeCommunicationException(
- "node address", "command", "reason"
- )
-
- self.assertRaises(
- NodeCommunicationException,
- lambda: lib.remote_client_destroy(mock_communicator, "node address")
- )
-
-
class GetOutputCertificateTest(TestCase):
def setUp(self):
self.file_path = get_test_resource("qdevice-certs/qnetd-cacert.crt")
self.file_data = open(self.file_path, "rb").read()
def test_success(self):
- cert_tool_output = """
-some line
-Certificate stored in {0}
-some other line
- """.format(self.file_path)
+ cert_tool_output = dedent(
+ """
+ some line
+ Certificate stored in {0}
+ some other line
+ """.format(self.file_path)
+ )
report_func = mock.MagicMock()
self.assertEqual(
@@ -1036,11 +824,13 @@ some other line
report_func.assert_not_called()
def test_success_request(self):
- cert_tool_output = """
-some line
-Certificate request stored in {0}
-some other line
- """.format(self.file_path)
+ cert_tool_output = dedent(
+ """
+ some line
+ Certificate request stored in {0}
+ some other line
+ """.format(self.file_path)
+ )
report_func = mock.MagicMock()
self.assertEqual(
@@ -1068,11 +858,13 @@ some other line
)
def test_cannot_read_file(self):
- cert_tool_output = """
-some line
-Certificate request stored in {0}.bad
-some other line
- """.format(self.file_path)
+ cert_tool_output = dedent(
+ """
+ some line
+ Certificate request stored in {0}.bad
+ some other line
+ """.format(self.file_path)
+ )
report_func = reports.qdevice_certificate_import_error
assert_raise_library_error(
diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
index 3f1d9a1..58a87e7 100644
--- a/pcs/test/test_lib_sbd.py
+++ b/pcs/test/test_lib_sbd.py
@@ -4,9 +4,8 @@ from __future__ import (
print_function,
)
-import json
from pcs.test.tools.misc import outdent
-from pcs.test.tools.pcs_unittest import TestCase, mock, skip
+from pcs.test.tools.pcs_unittest import TestCase, mock
from pcs.test.tools.assertions import (
assert_raise_library_error,
@@ -21,13 +20,7 @@ from pcs.lib.errors import (
ReportItemSeverity as Severities,
LibraryError,
)
-from pcs.lib.node import NodeAddresses
-from pcs.lib.external import (
- CommandRunner,
- NodeCommunicationException,
- NodeCommunicator,
- NodeConnectionException,
-)
+from pcs.lib.external import CommandRunner
import pcs.lib.sbd as lib_sbd
from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
@@ -221,674 +214,6 @@ class AtbHasToBeEnabledTest(TestCase):
)
- at skip("TODO: rewrite for pcs.lib.communication.sbd.CheckSbd")
-class CheckSbdTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- lib_sbd.check_sbd(
- mock_communicator, node, "/dev/watchdog", ["/dev/sdb1", "/dev/sdc"]
- )
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/check_sbd", "watchdog=%2Fdev%2Fwatchdog&" +\
- "device_list=%5B%22%2Fdev%2Fsdb1%22%2C+%22%2Fdev%2Fsdc%22%5D"
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.CheckSbd")
- at mock.patch("pcs.lib.sbd.check_sbd")
-class CheckSbdOnNodeTest(TestCase):
- def setUp(self):
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_rep = MockLibraryReportProcessor()
- self.node = NodeAddresses("node1")
- self.device_list = ["/dev/sdb1", "/dev/sdc"]
-
- def test_success(self, mock_check_sbd):
- mock_check_sbd.return_value = json.dumps({
- "sbd": {
- "installed": True
- },
- "watchdog": {
- "exist": True
- }
- })
- # if no exception was raised, it's fine
- lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.SBD_CHECK_SUCCESS,
- {"node": self.node.label}
- )]
- )
-
- def test_unable_to_connect(self, mock_check_sbd):
- mock_check_sbd.side_effect = NodeConnectionException(
- self.node.label, "command", "reason"
- )
- self.assertRaises(
- NodeCommunicationException,
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- def test_data_parsing_error(self, mock_check_sbd):
- mock_check_sbd.return_value = "invalid JSON"
- assert_raise_library_error(
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- ),
- (
- Severities.ERROR,
- report_codes.INVALID_RESPONSE_FORMAT,
- {"node": self.node.label}
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- def test_sbd_not_installed(self, mock_check_sbd):
- mock_check_sbd.return_value = json.dumps({
- "sbd": {
- "installed": False
- },
- "watchdog": {
- "exist": True
- }
- })
- assert_raise_library_error(
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- ),
- (
- Severities.ERROR,
- report_codes.SBD_NOT_INSTALLED,
- {"node": self.node.label}
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- def test_watchdog_does_not_exist(self, mock_check_sbd):
- mock_check_sbd.return_value = json.dumps({
- "sbd": {
- "installed": True
- },
- "watchdog": {
- "exist": False
- }
- })
- assert_raise_library_error(
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- ),
- (
- Severities.ERROR,
- report_codes.WATCHDOG_NOT_FOUND,
- {"node": self.node.label}
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- def test_watchdog_does_not_exist_and_sbd_not_installed(
- self, mock_check_sbd
- ):
- mock_check_sbd.return_value = json.dumps({
- "sbd": {
- "installed": False
- },
- "watchdog": {
- "exist": False
- }
- })
- assert_raise_library_error(
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- ),
- (
- Severities.ERROR,
- report_codes.WATCHDOG_NOT_FOUND,
- {"node": self.node.label}
- ),
- (
- Severities.ERROR,
- report_codes.SBD_NOT_INSTALLED,
- {"node": self.node.label}
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- def test_one_device_does_not_exist(self, mock_check_sbd):
- mock_check_sbd.return_value = json.dumps({
- "sbd": {
- "installed": True
- },
- "watchdog": {
- "exist": True
- },
- "device_list": [
- {
- "path": "/dev/sbd",
- "exist": True,
- "block_device": True,
- },
- {
- "path": "/dev/sdc",
- "exist": False,
- "block_device": False,
- }
- ]
- })
- assert_raise_library_error(
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- ),
- (
- Severities.ERROR,
- report_codes.SBD_DEVICE_DOES_NOT_EXIST,
- {
- "device": "/dev/sdc",
- "node": self.node.label,
- }
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- def test_devices_issues(self, mock_check_sbd):
- mock_check_sbd.return_value = json.dumps({
- "sbd": {
- "installed": True
- },
- "watchdog": {
- "exist": True
- },
- "device_list": [
- {
- "path": "/dev/sdb",
- "exist": True,
- "block_device": False,
- },
- {
- "path": "/dev/sdc",
- "exist": False,
- "block_device": False,
- }
- ]
- })
- assert_raise_library_error(
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- ),
- (
- Severities.ERROR,
- report_codes.SBD_DEVICE_DOES_NOT_EXIST,
- {
- "device": "/dev/sdc",
- "node": self.node.label,
- }
- ),
- (
- Severities.ERROR,
- report_codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE,
- {
- "device": "/dev/sdb",
- "node": self.node.label,
- }
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- def test_invalid_response_format(self, mock_check_sbd):
- mock_check_sbd.return_value = json.dumps({
- "not_sbd": {
- "installed": False
- },
- "watchdog": {
- "exist": False
- }
- })
- assert_raise_library_error(
- lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog",
- self.device_list
- ),
- (
- Severities.ERROR,
- report_codes.INVALID_RESPONSE_FORMAT,
- {"node": self.node.label}
- )
- )
- mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog", self.device_list
- )
- self.assertEqual(0, len(self.mock_rep.report_item_list))
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.CheckSbd")
- at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
-class CheckSbdOnAllNodesTest(TestCase):
- def test_success(self, mock_func):
- mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- mock_rep = MockLibraryReportProcessor()
- node_list = [NodeAddresses("node" + str(i)) for i in range(2)]
- data = {
- node_list[0]: {
- "watchdog": "/dev/watchdog1",
- "device_list": ["/dev/sdb", "/dev/vda1"],
- },
- node_list[1]: {
- "watchdog": "/dev/watchdog2",
- "device_list": ["/dev/sda2"],
- }
- }
- lib_sbd.check_sbd_on_all_nodes(mock_rep, mock_com, data)
- items = sorted(data.items())
- mock_func.assert_called_once_with(
- lib_sbd.check_sbd_on_node,
- [
- (
- [
- mock_rep, mock_com, node, data["watchdog"],
- data["device_list"]
- ],
- {}
- )
- for node, data in items
- ]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.SetSbdConfig")
-class SetSbdConfigTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- cfg = """# This file has been generated by pcs.
-SBD_OPTS="-n node1"
-SBD_WATCHDOG_DEV=/dev/watchdog
-SBD_WATCHDOG_TIMEOUT=0
-"""
- lib_sbd.set_sbd_config(mock_communicator, node, cfg)
- cfg_url_encoded = "%23+This+file+has+been+generated+by+" \
- "pcs.%0ASBD_OPTS%3D%22-n+node1%22%0ASBD_WATCHDOG_DEV%3D%2Fdev%2" \
- "Fwatchdog%0ASBD_WATCHDOG_TIMEOUT%3D0%0A"
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/set_sbd_config", "config=" + cfg_url_encoded
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.SetSbdConfig")
- at mock.patch("pcs.lib.sbd.set_sbd_config")
-class SetSbdConfigOnNodeTest(TestCase):
- def setUp(self):
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_rep = MockLibraryReportProcessor()
- self.node = NodeAddresses("node1")
-
- def test_success(self, mock_set_sbd_cfg):
- cfg_in = {
- "SBD_WATCHDOG_DEV": "/dev/watchdog",
- "SBD_WATCHDOG_TIMEOUT": "0"
- }
- cfg_out = """# This file has been generated by pcs.
-SBD_OPTS="-n node1"
-SBD_WATCHDOG_DEV=/my/watchdog
-SBD_WATCHDOG_TIMEOUT=0
-"""
- lib_sbd.set_sbd_config_on_node(
- self.mock_rep, self.mock_com, self.node, cfg_in, "/my/watchdog"
- )
- mock_set_sbd_cfg.assert_called_once_with(
- self.mock_com, self.node, cfg_out
- )
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
- {"node": self.node.label}
- )]
- )
-
- def test_with_devices(self, mock_set_sbd_cfg):
- cfg_in = {
- "SBD_WATCHDOG_DEV": "/dev/watchdog",
- "SBD_WATCHDOG_TIMEOUT": "0",
- }
- cfg_out = """# This file has been generated by pcs.
-SBD_DEVICE="/dev/sdb;/dev/vda"
-SBD_OPTS="-n node1"
-SBD_WATCHDOG_DEV=/my/watchdog
-SBD_WATCHDOG_TIMEOUT=0
-"""
- lib_sbd.set_sbd_config_on_node(
- self.mock_rep, self.mock_com, self.node, cfg_in, "/my/watchdog",
- ["/dev/sdb", "/dev/vda"]
- )
- mock_set_sbd_cfg.assert_called_once_with(
- self.mock_com, self.node, cfg_out
- )
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
- {"node": self.node.label}
- )]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.SetSbdConfig")
- at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
-class SetSbdConfigOnAllNodesTest(TestCase):
- def test_success(self, mock_func):
- mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- mock_rep = MockLibraryReportProcessor()
- watchdog_dict = dict([
- (NodeAddresses("node" + str(i)), "/dev/watchdog" + str(i))
- for i in range(5)
- ])
- device_dict = dict([
- (NodeAddresses("node" + str(i)), ["/dev/sdb" + str(i)])
- for i in range(5)
- ])
- node_list = list(watchdog_dict.keys())
- config = {
- "opt1": "val1",
- "opt2": "val2"
- }
- lib_sbd.set_sbd_config_on_all_nodes(
- mock_rep, mock_com, node_list, config, watchdog_dict, device_dict
- )
- mock_func.assert_called_once_with(
- lib_sbd.set_sbd_config_on_node,
- [
- (
- [
- mock_rep, mock_com, node, config, watchdog_dict[node],
- device_dict[node]
- ], {}
- )
- for node in node_list
- ]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.EnableSbdService")
-class EnableSbdServiceTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- lib_sbd.enable_sbd_service(mock_communicator, node)
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/sbd_enable", None
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.EnableSbdService")
-class EnableSbdServiceOnNodeTest(TestCase):
- def setUp(self):
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_rep = MockLibraryReportProcessor()
- self.node = NodeAddresses("node1")
-
- @mock.patch("pcs.lib.sbd.enable_sbd_service")
- def test_success(self, mock_enable_sbd):
- lib_sbd.enable_sbd_service_on_node(
- self.mock_rep, self.mock_com, self.node
- )
- mock_enable_sbd.assert_called_once_with(self.mock_com, self.node)
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.SERVICE_ENABLE_SUCCESS,
- {
- "service": "sbd",
- "node": self.node.label
- }
- )]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.EnableSbdService")
- at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
-class EnableSbdServiceOnAllNodes(TestCase):
- def test_success(self, mock_func):
- mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- mock_rep = MockLibraryReportProcessor()
- node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
- lib_sbd.enable_sbd_service_on_all_nodes(mock_rep, mock_com, node_list)
- mock_func.assert_called_once_with(
- lib_sbd.enable_sbd_service_on_node,
- [([mock_rep, mock_com, node], {}) for node in node_list]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.DisableSbdService")
-class DisableSbdServiceTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- lib_sbd.disable_sbd_service(mock_communicator, node)
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/sbd_disable", None
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.DisableSbdService")
-class DisableSbdServiceOnNodeTest(TestCase):
- def setUp(self):
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.mock_rep = MockLibraryReportProcessor()
- self.node = NodeAddresses("node1")
-
- @mock.patch("pcs.lib.sbd.disable_sbd_service")
- def test_success(self, mock_disable_sbd):
- lib_sbd.disable_sbd_service_on_node(
- self.mock_rep, self.mock_com, self.node
- )
- mock_disable_sbd.assert_called_once_with(self.mock_com, self.node)
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [(
- Severities.INFO,
- report_codes.SERVICE_DISABLE_SUCCESS,
- {
- "service": "sbd",
- "node": self.node.label
- }
- )]
- )
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.DisableSbdService")
- at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
-class DisableSbdServiceOnAllNodes(TestCase):
- def test_success(self, mock_func):
- mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- mock_rep = MockLibraryReportProcessor()
- node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
- lib_sbd.disable_sbd_service_on_all_nodes(mock_rep, mock_com, node_list)
- mock_func.assert_called_once_with(
- lib_sbd.disable_sbd_service_on_node,
- [([mock_rep, mock_com, node], {}) for node in node_list]
- )
-
- at skip(
- "TODO: rewrite for "
- "pcs.lib.communication.sbd.SetStonithWatchdogTimeoutToZero"
-)
-class SetStonithWatchdogTimeoutToZeroTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- lib_sbd.set_stonith_watchdog_timeout_to_zero(mock_communicator, node)
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/set_stonith_watchdog_timeout_to_zero", None
- )
-
- at skip(
- "TODO: rewrite for "
- "pcs.lib.communication.sbd.SetStonithWatchdogTimeoutToZero"
-)
- at mock.patch("pcs.lib.sbd.set_stonith_watchdog_timeout_to_zero")
-class SetStonithWatchdogTimeoutToZeroOnAllNodesTest(TestCase):
- def setUp(self):
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
-
- def test_success(self, mock_func):
- lib_sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes(
- self.mock_com, self.node_list
- )
- func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
- self.assertEqual(mock_func.call_count, len(func_calls))
- mock_func.assert_has_calls(func_calls)
-
- def test_communication_error(self, mock_func):
- def raiser(_, node):
- if node == self.node_list[1]:
- raise NodeConnectionException(
- self.node_list[1], "command", "reason"
- )
- elif node == self.node_list[4]:
- raise NodeCommunicationException(
- self.node_list[4], "command", "reason"
- )
-
- mock_func.side_effect = raiser
- assert_raise_library_error(
- lambda: lib_sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes(
- self.mock_com, self.node_list
- ),
- (
- Severities.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": self.node_list[1],
- "command": "command",
- "reason": "reason"
- }
- ),
- (
- Severities.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR,
- {
- "node": self.node_list[4],
- "command": "command",
- "reason": "reason"
- }
- )
- )
- func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
- self.assertEqual(mock_func.call_count, len(func_calls))
- mock_func.assert_has_calls(func_calls)
-
- at skip(
- "TODO: rewrite for pcs.lib.communication.sbd.RemoveStonithWatchdogTimeout"
-)
-class RemoveStonithWatchdogTimeoutTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- lib_sbd.remove_stonith_watchdog_timeout(mock_communicator, node)
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/remove_stonith_watchdog_timeout", None
- )
-
- at skip(
- "TODO: rewrite for pcs.lib.communication.sbd.RemoveStonithWatchdogTimeout"
-)
- at mock.patch("pcs.lib.sbd.remove_stonith_watchdog_timeout")
-class RemoveStonithWatchdogTimeoutOnAllNodesTest(TestCase):
- def setUp(self):
- self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
- self.node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
-
- def test_success(self, mock_func):
- lib_sbd.remove_stonith_watchdog_timeout_on_all_nodes(
- self.mock_com, self.node_list
- )
- func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
- self.assertEqual(mock_func.call_count, len(func_calls))
- mock_func.assert_has_calls(func_calls)
-
- def test_communication_error(self, mock_func):
- def raiser(_, node):
- if node == self.node_list[1]:
- raise NodeConnectionException(
- self.node_list[1], "command", "reason"
- )
- elif node == self.node_list[4]:
- raise NodeCommunicationException(
- self.node_list[4], "command", "reason"
- )
-
- mock_func.side_effect = raiser
- assert_raise_library_error(
- lambda: lib_sbd.remove_stonith_watchdog_timeout_on_all_nodes(
- self.mock_com, self.node_list
- ),
- (
- Severities.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": self.node_list[1],
- "command": "command",
- "reason": "reason"
- }
- ),
- (
- Severities.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR,
- {
- "node": self.node_list[4],
- "command": "command",
- "reason": "reason"
- }
- )
- )
- func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
- self.assertEqual(mock_func.call_count, len(func_calls))
- mock_func.assert_has_calls(func_calls)
-
- at skip("TODO: rewrite for pcs.lib.communication.sbd.GetSbdStatus")
-class GetSbdConfigTest(TestCase):
- def test_success(self):
- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
- node = NodeAddresses("node1")
- lib_sbd.get_sbd_config(mock_communicator, node)
- mock_communicator.call_node.assert_called_once_with(
- node, "remote/get_sbd_config", None
- )
-
-
@mock.patch("pcs.lib.external.is_systemctl")
class GetSbdServiceNameTest(TestCase):
def test_systemctl(self, mock_is_systemctl):
diff --git a/pcs/test/test_misc.py b/pcs/test/test_misc.py
new file mode 100644
index 0000000..a8b0b43
--- /dev/null
+++ b/pcs/test/test_misc.py
@@ -0,0 +1,38 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import shutil
+
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.misc import (
+ get_test_resource as rc,
+ outdent,
+)
+from pcs.test.tools.pcs_runner import PcsRunner
+from pcs.test.tools.pcs_unittest import TestCase
+
+temp_cib = rc("temp-cib.xml")
+
+class OldCibPushTest(TestCase, AssertPcsMixin):
+ def setUp(self):
+ shutil.copy(rc("cib-empty-1.2.xml"), temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+
+ def test_warning_old_push(self):
+ self.assert_pcs_success(
+ "resource create dummy ocf:pacemaker:Dummy --no-default-ops",
+ "Warning: Replacing the whole CIB instead of applying a diff, "
+ "a race condition may happen if the CIB is pushed more than "
+ "once simultaneously. To fix this, upgrade pacemaker to get "
+ "crm_feature_set at least 3.0.9, current is 3.0.8.\n"
+ )
+ self.assert_pcs_success(
+ "resource --full",
+ outdent("""\
+ Resource: dummy (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """)
+ )
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index bd596f6..5e8b0b8 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -2591,6 +2591,19 @@ Ticket Constraints:
ac(output, "")
self.assertEqual(0, returnVal)
+ self.assert_pcs_fail_regardless_of_force(
+ "resource move",
+ "Error: must specify a resource to move\n"
+ )
+ self.assert_pcs_fail_regardless_of_force(
+ "resource ban",
+ "Error: must specify a resource to ban\n"
+ )
+ self.assert_pcs_fail_regardless_of_force(
+ "resource clear",
+ "Error: must specify a resource to clear\n"
+ )
+
output, returnVal = pcs(
temp_cib,
"resource create --no-default-ops dummy ocf:heartbeat:Dummy"
@@ -3444,11 +3457,11 @@ Error: Cannot remove more than one resource from cloned group
# bad resource name
o,r = pcs(temp_cib, "resource enable NoExist")
- ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
+ ac(o,"Error: bundle/clone/group/master/resource 'NoExist' does not exist\n")
assert r == 1
o,r = pcs(temp_cib, "resource disable NoExist")
- ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
+ ac(o,"Error: bundle/clone/group/master/resource 'NoExist' does not exist\n")
assert r == 1
# cloned group
@@ -3955,7 +3968,7 @@ Error: Cannot remove more than one resource from cloned group
self.assert_pcs_fail_regardless_of_force(
"resource enable dummy3 dummyX",
- "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
+ "Error: bundle/clone/group/master/resource 'dummyX' does not exist\n"
)
self.assert_pcs_success(
"resource show --full",
@@ -3975,7 +3988,7 @@ Error: Cannot remove more than one resource from cloned group
self.assert_pcs_fail_regardless_of_force(
"resource disable dummy1 dummyX",
- "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
+ "Error: bundle/clone/group/master/resource 'dummyX' does not exist\n"
)
self.assert_pcs_success(
"resource show --full",
@@ -4683,7 +4696,7 @@ Error: Value of utilization attribute must be integer: 'test=int'
class ResourcesReferencedFromAclTest(unittest.TestCase, AssertPcsMixin):
def setUp(self):
- shutil.copy(rc('cib-empty-1.2.xml'), temp_cib)
+ shutil.copy(empty_cib, temp_cib)
self.pcs_runner = PcsRunner(temp_cib)
def test_remove_referenced_primitive_resource(self):
@@ -4798,7 +4811,7 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
class ResourceRemoveWithTicketTest(unittest.TestCase, AssertPcsMixin):
def setUp(self):
- shutil.copy(rc('cib-empty-1.2.xml'), temp_cib)
+ shutil.copy(empty_cib, temp_cib)
self.pcs_runner = PcsRunner(temp_cib)
def test_remove_ticket(self):
@@ -5103,7 +5116,7 @@ class BundleMiscCommands(BundleCommon):
class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
def setUp(self):
- shutil.copy(rc('cib-empty-1.2.xml'), temp_cib)
+ shutil.copy(empty_cib, temp_cib)
self.pcs_runner = PcsRunner(temp_cib)
def test_update_fail_on_pacemaker_guest_attempt(self):
diff --git a/pcs/test/test_status.py b/pcs/test/test_status.py
index b412b91..1a4fb70 100644
--- a/pcs/test/test_status.py
+++ b/pcs/test/test_status.py
@@ -21,45 +21,64 @@ class StonithWarningTest(TestCase, AssertPcsMixin):
shutil.copy(self.empty_cib, self.temp_cib)
self.pcs_runner = PcsRunner(self.temp_cib)
- def fixture_stonith(self, action=False):
+ def fixture_stonith_action(self):
self.assert_pcs_success(
- "stonith create S fence_apc ipaddr=i login=l {0} --force".format(
- "action=reboot" if action else ""
- ),
+ "stonith create Sa fence_apc ipaddr=i login=l action=reboot --force",
"Warning: stonith option 'action' is deprecated and should not be"
" used, use pcmk_off_action, pcmk_reboot_action instead\n"
- if action
- else ""
+ )
+
+ def fixture_stonith_cycle(self):
+ self.assert_pcs_success(
+ "stonith create Sc fence_ipmilan method=cycle"
)
def fixture_resource(self):
self.assert_pcs_success(
- "resource create dummy ocf:pacemaker:Dummy action=reboot --force",
- "Warning: invalid resource option 'action', allowed options are: "
- "envfile, fail_start_on, fake, op_sleep, passwd, state,"
- " trace_file, trace_ra\n"
+ "resource create dummy ocf:pacemaker:Dummy action=reboot "
+ "method=cycle --force"
+ ,
+ "Warning: invalid resource options: 'action', 'method', allowed "
+ "options are: envfile, fail_start_on, fake, op_sleep, passwd, "
+ "state, trace_file, trace_ra\n"
)
def test_warning_stonith_action(self):
- self.fixture_stonith(action=True)
+ self.fixture_stonith_action()
+ self.fixture_resource()
self.assert_pcs_success(
"status",
stdout_start=dedent("""\
Cluster name: test99
- WARNING: following stonith devices have the 'action' attribute set, it is recommended to set 'pcmk_off_action', 'pcmk_reboot_action' instead: S
+ WARNING: following stonith devices have the 'action' option set, it is recommended to set 'pcmk_off_action', 'pcmk_reboot_action' instead: Sa
Stack: unknown
Current DC: NONE
""")
)
- def test_action_ignored_for_non_stonith_resources(self):
- self.fixture_stonith(action=False)
+ def test_warning_stonith_method_cycle(self):
+ self.fixture_stonith_cycle()
self.fixture_resource()
+ self.assert_pcs_success(
+ "status",
+ stdout_start=dedent("""\
+ Cluster name: test99
+ WARNING: following stonith devices have the 'method' option set to 'cycle' which is potentially dangerous, please consider using 'onoff': Sc
+ Stack: unknown
+ Current DC: NONE
+ """)
+ )
+ def test_stonith_warnings(self):
+ self.fixture_stonith_action()
+ self.fixture_stonith_cycle()
+ self.fixture_resource()
self.assert_pcs_success(
"status",
stdout_start=dedent("""\
Cluster name: test99
+ WARNING: following stonith devices have the 'action' option set, it is recommended to set 'pcmk_off_action', 'pcmk_reboot_action' instead: Sa
+ WARNING: following stonith devices have the 'method' option set to 'cycle' which is potentially dangerous, please consider using 'onoff': Sc
Stack: unknown
Current DC: NONE
""")
diff --git a/pcs/test/tools/assertions.py b/pcs/test/tools/assertions.py
index d909d96..d9193e4 100644
--- a/pcs/test/tools/assertions.py
+++ b/pcs/test/tools/assertions.py
@@ -9,6 +9,7 @@ import doctest
from lxml.doctestcompare import LXMLOutputChecker
from lxml.etree import LXML_VERSION
import re
+from pcs.lib.errors import ReportItemSeverity
from pcs.lib.errors import LibraryError
@@ -245,6 +246,36 @@ def assert_xml_equal(expected_xml, got_xml, context_explanation=""):
)
)
+SEVERITY_SHORTCUTS = {
+ ReportItemSeverity.INFO: "I",
+ ReportItemSeverity.WARNING: "W",
+ ReportItemSeverity.ERROR: "E",
+ ReportItemSeverity.DEBUG: "D",
+}
+
+def _format_report_item_info(info):
+ return ", ".join([
+ "{0}:{1}".format(key, repr(value)) for key, value in info.items()
+ ])
+
+def _expected_report_item_format(report_item_expectation):
+ return "{0} {1} {{{2}}} ! {3}".format(
+ SEVERITY_SHORTCUTS.get(
+ report_item_expectation[0], report_item_expectation[0]
+ ),
+ report_item_expectation[1],
+ _format_report_item_info(report_item_expectation[2]),
+ report_item_expectation[3]
+ )
+
+def _format_report_item(report_item):
+ return _expected_report_item_format((
+ report_item.severity,
+ report_item.code,
+ report_item.info,
+ report_item.forceable
+ ))
+
def assert_report_item_equal(real_report_item, report_item_info):
if not __report_item_equal(real_report_item, report_item_info):
raise AssertionError(
@@ -256,22 +287,61 @@ def assert_report_item_equal(real_report_item, report_item_info):
report_item_info[2],
None if len(report_item_info) < 4 else report_item_info[3]
)),
- repr((
- real_report_item.severity,
- real_report_item.code,
- real_report_item.info,
- real_report_item.forceable
- ))
+ _format_report_item(real_report_item)
)
)
+def _unexpected_report_given(
+ all_expected_report_info_list,
+ expected_report_info_list, real_report_item, real_report_item_list
+):
+ return AssertionError(
+ (
+ "\n Unexpected real report given:"
+ "\n =============================\n {0}\n"
+ "\n remaining expected reports ({1}) are:"
+ "\n ------------------------------------\n {2}\n"
+ "\n all expected reports ({3}) are:"
+ "\n ------------------------------\n {4}\n"
+ "\n all real reports ({5}):"
+ "\n ---------------------\n {6}"
+ )
+ .format(
+ _format_report_item(real_report_item),
+ len(expected_report_info_list),
+ "\n ".join(map(
+ _expected_report_item_format, expected_report_info_list
+ )) if expected_report_info_list
+ else "No other report is expected!"
+ ,
+ len(all_expected_report_info_list),
+ "\n ".join(map(
+ _expected_report_item_format, all_expected_report_info_list
+ )) if all_expected_report_info_list
+ else "No report is expected!"
+ ,
+ len(real_report_item_list),
+ "\n ".join(map(_format_report_item, real_report_item_list)),
+ )
+ )
+
def assert_report_item_list_equal(
real_report_item_list, expected_report_info_list, hint=""
):
+ all_expected_report_info_list = expected_report_info_list[:]
for real_report_item in real_report_item_list:
- expected_report_info_list.remove(
- __find_report_info(expected_report_info_list, real_report_item)
+ found_report_info = __find_report_info(
+ expected_report_info_list,
+ real_report_item
)
+ if found_report_info is None:
+ raise _unexpected_report_given(
+ all_expected_report_info_list,
+ expected_report_info_list,
+ real_report_item,
+ real_report_item_list,
+ )
+ expected_report_info_list.remove(found_report_info)
if expected_report_info_list:
def format_items(item_type, item_list):
caption = "{0} ReportItems({1})".format(item_type, len(item_list))
@@ -300,20 +370,7 @@ def __find_report_info(expected_report_info_list, real_report_item):
for report_info in expected_report_info_list:
if __report_item_equal(real_report_item, report_info):
return report_info
- raise AssertionError(
- "Unexpected report given: \n{0} \nexpected reports are: \n{1}"
- .format(
- repr((
- real_report_item.severity,
- real_report_item.code,
- real_report_item.info,
- real_report_item.forceable
- )),
- "\n".join(map(repr, expected_report_info_list))
- if expected_report_info_list
- else " No other report is expected!"
- )
- )
+ return None
def __report_item_equal(real_report_item, report_item_info):
return (
diff --git a/pcs/test/tools/case_analysis.py b/pcs/test/tools/case_analysis.py
new file mode 100644
index 0000000..9117d08
--- /dev/null
+++ b/pcs/test/tools/case_analysis.py
@@ -0,0 +1,29 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+def _list2reason(test, exc_list):
+ if exc_list and exc_list[-1][0] is test:
+ return exc_list[-1][1]
+
+def test_failed(test):
+ # Borrowed from
+ # https://stackoverflow.com/questions/4414234/getting-pythons-unittest-results-in-a-teardown-method/39606065#39606065
+ # for Python versions 2.7 to 3.6
+ if hasattr(test, '_outcome'): # Python 3.4+
+ # these 2 methods have no side effects
+ result = test.defaultTestResult()
+ test._feedErrorsToResult(result, test._outcome.errors)
+ else: # Python 3.2 - 3.3 or 3.0 - 3.1 and 2.7
+ result = getattr(
+ test,
+ '_outcomeForDoCleanups', test._resultForDoCleanups
+ )
+
+ return (
+ _list2reason(test, result.errors)
+ or
+ _list2reason(test, result.failures)
+ )
diff --git a/pcs/test/tools/command_env/assistant.py b/pcs/test/tools/command_env/assistant.py
index a3fef28..916246b 100644
--- a/pcs/test/tools/command_env/assistant.py
+++ b/pcs/test/tools/command_env/assistant.py
@@ -5,12 +5,25 @@ from __future__ import (
)
import logging
+import os
+import os.path
+import sys
from functools import partial
from pcs.lib.env import LibraryEnvironment
from pcs.test.tools.assertions import assert_raise_library_error, prepare_diff
+from pcs.test.tools.case_analysis import test_failed
+from pcs.test.tools.command_env import spy
from pcs.test.tools.command_env.calls import Queue as CallQueue
from pcs.test.tools.command_env.config import Config
+from pcs.test.tools.command_env.mock_fs import(
+ get_fs_mock,
+ is_fs_call_in,
+)
+from pcs.test.tools.command_env.mock_get_local_corosync_conf import(
+ get_get_local_corosync_conf
+)
+from pcs.test.tools.command_env.mock_node_communicator import NodeCommunicator
from pcs.test.tools.command_env.mock_push_cib import(
get_push_cib,
is_push_cib_call_in,
@@ -19,14 +32,10 @@ from pcs.test.tools.command_env.mock_push_corosync_conf import(
get_push_corosync_conf,
is_push_corosync_conf_call_in,
)
+from pcs.test.tools import fixture
from pcs.test.tools.command_env.mock_runner import Runner
-from pcs.test.tools.command_env.mock_get_local_corosync_conf import(
- get_get_local_corosync_conf
-)
-from pcs.test.tools.command_env.mock_node_communicator import NodeCommunicator
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import mock
-from pcs.test.tools.command_env import spy
patch_lib_env = partial(mock.patch.object, LibraryEnvironment)
@@ -67,6 +76,31 @@ def patch_env(call_queue, config, init_env):
)
]
+ if is_fs_call_in(call_queue):
+ fs_mock = get_fs_mock(call_queue)
+ builtin = (
+ ("__builtin__" if sys.version_info[0] == 2 else "builtins")+".{0}"
+ ).format
+
+ patcher_list.extend([
+ mock.patch(
+ builtin("open"),
+ fs_mock("open", open)
+ ),
+ mock.patch(
+ "os.path.exists",
+ fs_mock("os.path.exists", os.path.exists)
+ ),
+ mock.patch(
+ "os.chmod",
+ fs_mock("os.chmod", os.chmod)
+ ),
+ mock.patch(
+ "os.chown",
+ fs_mock("os.chown", os.chown)
+ ),
+ ])
+
# It is not always desirable to patch these methods. Some tests may patch
# only the internals (runner etc.). So these methods are only patched when
# it is explicitly configured.
@@ -113,16 +147,23 @@ class EnvAssistant(object):
self.__original_mocked_corosync_conf = None
if test_case:
- test_case.addCleanup(self.cleanup)
+ test_case.addCleanup(lambda: self.cleanup(test_case))
@property
def config(self):
return self.__config
- def cleanup(self):
+ def cleanup(self, current_test):
if self.__unpatch:
self.__unpatch()
+ if test_failed(current_test):
+ # We have already got the message that main test failed. There is
+ # a high probability that something remains in reports or in the
+ # queue etc. But it is only consequence of the main test fail. And
+ # we do not want to make the report confusing.
+ return
+
if not self.__reports_asserted:
self.__assert_environment_created()
if not self.__config.spy:
@@ -178,7 +219,8 @@ class EnvAssistant(object):
"tokens": self.__config.spy.auth_tokens,
"ports": self.__config.spy.ports or {},
}) if self.__config.spy else None
- )
+ ),
+ booth=self.__config.env.booth,
)
self.__unpatch = patch_env(self.__call_queue, self.__config, self._env)
# If pushing corosync.conf has not been patched in the
@@ -190,16 +232,25 @@ class EnvAssistant(object):
)
return self._env
- def assert_reports(self, reports):
+ def assert_reports(self, expected_reports):
self.__reports_asserted = True
self.__assert_environment_created()
self._env.report_processor.assert_reports(
- reports + self.__extra_reports
+ (
+ expected_reports.reports
+ if isinstance(expected_reports, fixture.ReportStore)
+ else expected_reports
+ )
+ +
+ self.__extra_reports
)
def assert_raise_library_error(
- self, command, reports, expected_in_processor=None
+ self, command, reports=None, expected_in_processor=None
):
+ if reports is None:
+ reports = []
+
if not isinstance(reports, list):
raise self.__list_of_reports_expected(reports)
diff --git a/pcs/test/tools/command_env/config.py b/pcs/test/tools/command_env/config.py
index fa1fd46..d3f1561 100644
--- a/pcs/test/tools/command_env/config.py
+++ b/pcs/test/tools/command_env/config.py
@@ -4,11 +4,14 @@ from __future__ import (
print_function,
)
+import inspect
+
from pcs.test.tools.command_env.calls import CallListBuilder
from pcs.test.tools.command_env.config_env import EnvConfig
from pcs.test.tools.command_env.config_corosync_conf import CorosyncConf
from pcs.test.tools.command_env.config_runner import RunnerConfig
from pcs.test.tools.command_env.config_http import HttpConfig
+from pcs.test.tools.command_env.config_fs import FsConfig
class Spy(object):
def __init__(self, auth_tokens, ports=None):
@@ -19,22 +22,32 @@ class Config(object):
def __init__(self):
self.__calls = CallListBuilder()
self.runner = self.__wrap_helper(
- RunnerConfig(
- self.__calls,
- self.__wrap_helper,
- )
+ RunnerConfig(self.__calls, self.__wrap_helper)
)
self.env = self.__wrap_helper(EnvConfig(self.__calls))
- self.http = self.__wrap_helper(HttpConfig(self.__calls))
+ self.http = self.__wrap_helper(
+ HttpConfig(self.__calls, self.__wrap_helper)
+ )
self.corosync_conf = self.__wrap_helper(CorosyncConf(self.__calls))
+ self.fs = self.__wrap_helper(FsConfig(self.__calls))
self.spy = None
+ def add_extension(self, name, Extension):
+ if hasattr(self, name):
+ raise AssertionError(
+ "Config (integration tests) has the extension '{0}' already."
+ .format(name)
+ )
+ setattr(self, name, self.__wrap_helper(
+ Extension(self.__calls, self.__wrap_helper, self)
+ ))
+
+
def set_spy(self, auth_tokens, ports=None):
self.spy = Spy(auth_tokens, ports)
return self
-
@property
def calls(self):
return self.__calls
@@ -65,7 +78,7 @@ class Config(object):
object helper -- helper for creatig call configuration
"""
- for name, attr in helper.__class__.__dict__.items():
+ for name, attr in inspect.getmembers(helper.__class__):
if not name.startswith("_") and hasattr(attr, "__call__"):
self.__wrap_method(helper, name, attr)
return helper
diff --git a/pcs/test/tools/command_env/config_env.py b/pcs/test/tools/command_env/config_env.py
index 5aaec15..b2d7a97 100644
--- a/pcs/test/tools/command_env/config_env.py
+++ b/pcs/test/tools/command_env/config_env.py
@@ -17,6 +17,7 @@ class EnvConfig(object):
self.__cib_data = None
self.__cib_tempfile = None
self.__corosync_conf_data = None
+ self.__booth = None
def set_cib_data(self, cib_data, cib_tempfile="/fake/tmp/file"):
@@ -32,6 +33,13 @@ class EnvConfig(object):
def cib_tempfile(self):
return self.__cib_tempfile
+ def set_booth(self, booth):
+ self.__booth = booth
+
+ @property
+ def booth(self):
+ return self.__booth
+
def set_corosync_conf_data(self, corosync_conf_data):
self.__corosync_conf_data = corosync_conf_data
diff --git a/pcs/test/tools/command_env/config_fs.py b/pcs/test/tools/command_env/config_fs.py
new file mode 100644
index 0000000..61e3b34
--- /dev/null
+++ b/pcs/test/tools/command_env/config_fs.py
@@ -0,0 +1,65 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_fs import Call as FsCall
+
+
+class FsConfig(object):
+ def __init__(self, call_collection):
+ self.__calls = call_collection
+
+ def open(
+ self, path, return_value=None, side_effect=None, name="fs.open",
+ mode="r", before=None, instead=None
+ ):
+ call = FsCall(
+ "open",
+ call_kwargs={"name": path, "mode": mode},
+ # TODO use mock_open here. Allow to use simply "read_data",
+ # "side_effect" etc. It depends on future use cases...
+ return_value=return_value,
+ side_effect=side_effect,
+ )
+ self.__calls.place(name, call, before, instead)
+
+ def exists(
+ self, path, return_value="", name="fs.exists", before=None, instead=None
+ ):
+ call = FsCall(
+ "os.path.exists",
+ call_kwargs={"path": path},
+ return_value=return_value,
+ )
+ self.__calls.place(name, call, before, instead)
+
+ def chmod(
+ self, path, mode, side_effect=None, name="os.chmod", before=None,
+ instead=None,
+ ):
+ call = FsCall(
+ "os.chmod",
+ call_kwargs=dict(
+ fd=path,
+ mode=mode,
+ ),
+ side_effect=side_effect,
+ )
+ self.__calls.place(name, call, before, instead)
+
+ def chown(
+ self, path, uid, gid, side_effect=None, name="os.chown", before=None,
+ instead=None,
+ ):
+ call = FsCall(
+ "os.chown",
+ call_kwargs=dict(
+ fd=path,
+ uid=uid,
+ gid=gid,
+ ),
+ side_effect=side_effect,
+ )
+ self.__calls.place(name, call, before, instead)
diff --git a/pcs/test/tools/command_env/config_http.py b/pcs/test/tools/command_env/config_http.py
index b5c610b..a9fff91 100644
--- a/pcs/test/tools/command_env/config_http.py
+++ b/pcs/test/tools/command_env/config_http.py
@@ -4,123 +4,197 @@ from __future__ import (
print_function,
)
-from pcs.common import pcs_pycurl as pycurl
-from pcs.common.node_communicator import(
- RequestTarget,
- RequestData,
- Request,
- Response,
-)
+import json
+import pprint
+
+from pcs.test.tools.command_env.config_http_booth import BoothShortcuts
+from pcs.test.tools.command_env.config_http_corosync import CorosyncShortcuts
+from pcs.test.tools.command_env.config_http_host import HostShortcuts
+from pcs.test.tools.command_env.config_http_pcmk import PcmkShortcuts
+from pcs.test.tools.command_env.config_http_sbd import SbdShortcuts
from pcs.test.tools.command_env.mock_node_communicator import(
- AddRequestCall,
- StartLoopCall,
+ place_communication,
+ place_requests,
+ place_responses,
+)
+from pcs.test.tools.command_env.mock_node_communicator import (
+ place_multinode_call
)
-from pcs.test.tools.custom_mock import MockCurlSimple
+
+def _mutual_exclusive(param_names, **kwargs):
+ entered = {
+ key: value for key, value in kwargs.items()
+ if key in param_names and value is not None
+ }
+ if len(entered) != 1:
+ raise AssertionError(
+ "Exactly one of '{0}' must be specified, \nwas specified:\n{1}"
+ .format(
+ "', '".join(param_names),
+ pprint.pformat(entered) if entered else " nothing",
+ )
+ )
class HttpConfig(object):
- def __init__(self, call_collection):
+ def __init__(self, call_collection, wrap_helper):
self.__calls = call_collection
- def __communication_to_response(
- self, label, address_list, action, param_list, port, token,
- response_code, output, debug_output, was_connected, errno,
- error_msg
+ self.booth = wrap_helper(BoothShortcuts(self.__calls))
+ self.corosync = wrap_helper(CorosyncShortcuts(self.__calls))
+ self.host = wrap_helper(HostShortcuts(self.__calls))
+ self.pcmk = wrap_helper(PcmkShortcuts(self.__calls))
+ self.sbd = wrap_helper(SbdShortcuts(self.__calls))
+
+ def add_communication(self, name, communication_list, **kwargs):
+ """
+ Create a generic call for network communication.
+ string name -- key of the call
+ list of dict communication_list -- see
+ pcs.test.tools.command_env.mock_node_communicator.create_communication
+ **kwargs -- see
+ pcs.test.tools.command_env.mock_node_communicator.create_communication
+ """
+ place_communication(self.__calls, name, communication_list, **kwargs)
+
+ def add_requests(self, request_list, name):
+ place_requests(self.__calls, name, request_list)
+
+ def start_loop(self, response_list, name):
+ place_responses(self.__calls, name, response_list)
+
+ def put_file(
+ self, communication_list, name="http.common.put_file",
+ results=None, files=None, **kwargs
):
- return Response(
- MockCurlSimple(
- info={pycurl.RESPONSE_CODE: response_code},
- output=output.encode("utf-8"),
- debug_output=debug_output.encode("utf-8"),
- request=Request(
- RequestTarget(label, address_list, port, token),
- RequestData(action, param_list),
- )
- ),
- was_connected=was_connected,
- errno=6,
- error_msg= error_msg,
+ """
+ Example:
+ config.http.put_file(
+ communication_list=[dict(label="node")],
+ files={
+ "pacemaker_remote authkey": {
+ "type": "pcmk_remote_authkey",
+ "data": base64.b64encode(pcmk_authkey_content),
+ "rewrite_existing": True
+ }
+ },
+ results={
+ "pacemaker_remote authkey": {
+ "code": "written",
+ "message": "",
+ }
+ }
+ )
+ """
+
+ _mutual_exclusive(["output", "results"], results=results, **kwargs)
+ _mutual_exclusive(["files", "param_list"], files=files, **kwargs)
+
+ if results:
+ kwargs["output"]=json.dumps({"files": results})
+
+ if files:
+ kwargs["param_list"] = [("data_json", json.dumps(files))]
+
+
+ self.place_multinode_call(
+ name,
+ communication_list=communication_list,
+ action="remote/put_file",
+ **kwargs
)
- def add_communication(
- self, name, communication_list,
- action="", param_list=None, port=None, token=None,
- response_code=None, output="", debug_output="", was_connected=True,
- errno=0, error_msg_template=None
+ def remove_file(
+ self, communication_list, name="http.common.remove_file",
+ results=None, files=None, **kwargs
):
"""
- list of dict communication_list -- is setting for one request - response
- it accepts keys:
- label -- required, see RequestTarget
- action -- pcsd url, see RequestData
- param_list -- list of pairs, see RequestData
- port -- see RequestTarget
- token=None -- see RequestTarget
- response_code -- http response code
- output -- http response output
- debug_output -- pycurl debug output
- was_connected -- see Response
- errno -- see Response
- error_msg -- see Response
- if some key is not present, it is put here from common values - rest
- args of this fuction(except name, communication_list,
- error_msg_template)
- string error_msg_template -- template, the keys for format function will
- be taken from appropriate item of communication_list
- string action -- pcsd url, see RequestData
- list of pairs (tuple) param_list -- see RequestData
- string port -- see RequestTarget
- string token=None -- see RequestTarget
- string response_code -- http response code
- string output -- http response output
- string debug_output -- pycurl debug output
- bool was_connected -- see Response
- int errno -- see Response
- string error_msg -- see Response
- """
- response_list = []
-
- common = dict(
- action=action,
- param_list=param_list if param_list else [],
- port=port,
- token=token,
- response_code=response_code,
- output=output,
- debug_output=debug_output,
- was_connected=was_connected,
- errno=errno,
+ Example:
+ config.http.remove_file(
+ communication_list=[dict(label="node")],
+ files={
+ "pacemaker_remote authkey": {
+ "type": "pcmk_remote_authkey",
+ }
+ },
+ results={
+ "pacemaker_remote authkey": {
+ "code": "deleted",
+ "message": "",
+ }
+ }
)
- for communication in communication_list:
- if "address_list" not in communication:
- communication["address_list"] = [communication["label"]]
+ """
- full = common.copy()
- full.update(communication)
+ _mutual_exclusive(["output", "results"], results=results, **kwargs)
+ _mutual_exclusive(["files", "param_list"], files=files, **kwargs)
- if "error_msg" not in full:
- full["error_msg"] = (
- "" if not error_msg_template
- else error_msg_template.format(**full)
- )
+ if results:
+ kwargs["output"]=json.dumps({"files": results})
+ if files:
+ kwargs["param_list"] = [("data_json", json.dumps(files))]
- response_list.append(
- self.__communication_to_response(**full)
- )
- request_list = [response.request for response in response_list]
+ self.place_multinode_call(
+ name,
+ communication_list=communication_list,
+ action="remote/remove_file",
+ **kwargs
+ )
+
+ def manage_services(
+ self, communication_list, name="http.common.manage_services",
+ results=None, action_map=None, **kwargs
+ ):
+ """
+ Example:
+ config.http.manage_services(
+ communication_list=[dict(label=label)],
+ action_map={
+ "pacemaker_remote enable": {
+ "type": "service_command",
+ "service": "pacemaker_remote",
+ "command": "enable",
+ },
+ "pacemaker_remote start": {
+ "type": "service_command",
+ "service": "pacemaker_remote",
+ "command": "start",
+ },
+ },
+ results={
+ "pacemaker_remote enable": {
+ "code": "success",
+ "message": "",
+ },
+ "pacemaker_remote start": {
+ "code": "success",
+ "message": "",
+ }
+ }
+ )
+ """
+ _mutual_exclusive(["output", "results"], results=results, **kwargs)
+ _mutual_exclusive(
+ ["action_map", "param_list"],
+ action_map=action_map,
+ **kwargs
+ )
- #TODO #when multiple add_request needed there should be:
- # * unique name for each add_request
- # * find start_loop by name and replace it with the new one that will
- # have merged responses
- self.add_requests(request_list, name="{0}_requests".format(name))
- self.start_loop(response_list, name="{0}_responses".format(name))
+ if results:
+ kwargs["output"]=json.dumps({"actions": results})
+ if action_map:
+ kwargs["param_list"] = [("data_json", json.dumps(action_map))]
- def add_requests(self, request_list, name):
- self.__calls.place(name, AddRequestCall(request_list))
- def start_loop(self, response_list, name):
- self.__calls.place(name, StartLoopCall(response_list))
+ self.place_multinode_call(
+ name,
+ communication_list=communication_list,
+ action="remote/manage_services",
+ **kwargs
+ )
+
+ def place_multinode_call(self, *args, **kwargs):
+ place_multinode_call(self.__calls, *args, **kwargs)
diff --git a/pcs/test/tools/command_env/config_http_booth.py b/pcs/test/tools/command_env/config_http_booth.py
new file mode 100644
index 0000000..27f017d
--- /dev/null
+++ b/pcs/test/tools/command_env/config_http_booth.py
@@ -0,0 +1,73 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import json
+import base64
+
+from pcs.test.tools.command_env.mock_node_communicator import (
+ place_multinode_call
+)
+
+class BoothShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+
+ def send_config(
+ self, booth_name, config,
+ authfile=None,
+ authfile_data=None,
+ node_labels=None,
+ communication_list=None,
+ name="http.booth.send_config"
+ ):
+ data = {
+ "config": {
+ "name": "{}.conf".format(booth_name),
+ "data": config,
+ }
+ }
+ if authfile and authfile_data:
+ data["authfile"] = {
+ "name": authfile,
+ "data": base64.b64encode(authfile_data).decode("utf-8"),
+ }
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/booth_set_config",
+ param_list=[("data_json", json.dumps(data))]
+ )
+
+ def get_config(
+ self, booth_name,
+ config_data=None,
+ authfile=None,
+ authfile_data=None,
+ node_labels=None,
+ communication_list=None,
+ name="http.booth.get_config"
+ ):
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/booth_get_config",
+ param_list=[("name", booth_name)],
+ output=json.dumps({
+ "config": {
+ "data": config_data,
+ },
+ "authfile": {
+ "name": authfile,
+ "data":
+ base64.b64encode(authfile_data).decode("utf-8")
+ if authfile_data else None,
+ },
+ }),
+ )
diff --git a/pcs/test/tools/command_env/config_http_corosync.py b/pcs/test/tools/command_env/config_http_corosync.py
new file mode 100644
index 0000000..2c07302
--- /dev/null
+++ b/pcs/test/tools/command_env/config_http_corosync.py
@@ -0,0 +1,135 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_node_communicator import (
+ place_multinode_call
+)
+
+class CorosyncShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+
+ def check_corosync_offline(
+ self, node_labels=None, communication_list=None,
+ name="http.corosync.check_corosync_offline"
+ ):
+ """
+ Create a call for checking that corosync is offline
+
+ string name -- the key of this call
+ list node_labels -- create success responses from these nodes
+ list communication_list -- create custom responses
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/status",
+ output='{"corosync":false}'
+ )
+
+ def qdevice_client_enable(
+ self, name="http.corosync.qdevice_client_enable",
+ node_labels=None, communication_list=None
+ ):
+ """
+ Create a call for enabling qdevice service
+
+ string name -- the key of this call
+ list node_labels -- create success responses from these nodes
+ list communication_list -- create custom responses
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/qdevice_client_enable",
+ output="corosync-qdevice enabled",
+ )
+
+ def qdevice_client_disable(
+ self, name="http.corosync.qdevice_client_disable",
+ node_labels=None, communication_list=None
+ ):
+ """
+ Create a call for disabling qdevice service
+
+ string name -- the key of this call
+ list node_labels -- create success responses from these nodes
+ list communication_list -- create custom responses
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/qdevice_client_disable",
+ output="corosync-qdevice disabled",
+ )
+
+ def qdevice_client_start(
+ self, name="http.corosync.qdevice_client_start",
+ node_labels=None, communication_list=None
+ ):
+ """
+ Create a call for starting qdevice service
+
+ string name -- the key of this call
+ list node_labels -- create success responses from these nodes
+ list communication_list -- create custom responses
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/qdevice_client_start",
+ output="corosync-qdevice started",
+ )
+
+ def qdevice_client_stop(
+ self, name="http.corosync.qdevice_client_stop",
+ node_labels=None, communication_list=None
+ ):
+ """
+ Create a call for stopping qdevice service
+
+ string name -- the key of this call
+ list node_labels -- create success responses from these nodes
+ list communication_list -- create custom responses
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/qdevice_client_stop",
+ output="corosync-qdevice stopped",
+ )
+
+ def set_corosync_conf(
+ self, corosync_conf, node_labels=None, communication_list=None,
+ name="http.corosync.set_corosync_conf"
+ ):
+ """
+ Create a call for sending corosync.conf text
+
+ string corosync_conf -- corosync.conf text to be sent
+ list node_labels -- create success responses from these nodes
+ list communication_list -- create custom responses
+ string name -- the key of this call
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/set_corosync_conf",
+ param_list=[("corosync_conf", corosync_conf)],
+ output="Succeeded",
+ )
diff --git a/pcs/test/tools/command_env/config_http_host.py b/pcs/test/tools/command_env/config_http_host.py
new file mode 100644
index 0000000..f8475d9
--- /dev/null
+++ b/pcs/test/tools/command_env/config_http_host.py
@@ -0,0 +1,34 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_node_communicator import (
+ place_multinode_call
+)
+
+class HostShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+
+ def check_auth(
+ self, node_labels=None, communication_list=None,
+ name="http.host.check_auth"
+ ):
+ """
+ Create a call for checking authentication on hosts
+
+ node_labels list -- create success responses from these nodes
+ communication_list list -- create custom responses
+ name string -- the key of this call
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/check_auth",
+ output='{"success":true}',
+ param_list=[("check_auth_only", 1)],
+ )
diff --git a/pcs/test/tools/command_env/config_http_pcmk.py b/pcs/test/tools/command_env/config_http_pcmk.py
new file mode 100644
index 0000000..0c4e3d1
--- /dev/null
+++ b/pcs/test/tools/command_env/config_http_pcmk.py
@@ -0,0 +1,44 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_node_communicator import (
+ place_multinode_call
+)
+
+class PcmkShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+
+ def set_stonith_watchdog_timeout_to_zero(
+ self, node_labels=None, communication_list=None,
+ name="http.pcmk.set_stonith_watchdog_timeout_to_zero"
+ ):
+ """
+ Create a call for setting on hosts
+
+ node_labels list -- create success responses from these nodes
+ communication_list list -- create custom responses
+ name string -- the key of this call
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/set_stonith_watchdog_timeout_to_zero"
+ )
+
+ def remove_stonith_watchdog_timeout(
+ self, node_labels=None, communication_list=None,
+ name="http.pcmk.remove_stonith_watchdog_timeout"
+ ):
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/remove_stonith_watchdog_timeout"
+ )
diff --git a/pcs/test/tools/command_env/config_http_sbd.py b/pcs/test/tools/command_env/config_http_sbd.py
new file mode 100644
index 0000000..aebb3f2
--- /dev/null
+++ b/pcs/test/tools/command_env/config_http_sbd.py
@@ -0,0 +1,84 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_node_communicator import (
+ place_multinode_call
+)
+
+class SbdShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+
+ def enable_sbd(
+ self, node_labels=None, communication_list=None,
+ name="http.sbd.enable_sbd"
+ ):
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/sbd_enable"
+ )
+
+ def disable_sbd(
+ self, node_labels=None, communication_list=None,
+ name="http.sbd.disable_sbd"
+ ):
+ """
+ Create a call for disabling sbd on nodes
+
+ node_labels list -- create success responses from these nodes
+ communication_list list -- create custom responses
+ name string -- the key of this call
+ """
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/sbd_disable"
+ )
+
+ def check_sbd(
+ self, watchdog=None, device_list=(), node_labels=None,
+ communication_list=None, name="http.sbd.check_sbd"
+ ):
+ place_multinode_call(
+ self.__calls,
+ name,
+ node_labels,
+ communication_list,
+ action="remote/check_sbd",
+ )
+
+ def set_sbd_config(
+ self, config_generator=None, node_labels=None, communication_list=None,
+ name="http.sbd.set_sbd_config"
+ ):
+ if bool(config_generator) == bool(communication_list):
+ raise AssertionError(
+ "Exactly one of 'config_generator', 'communication_list' "
+ "must be specified"
+ )
+ if config_generator and not node_labels:
+ raise AssertionError(
+ "'node_labels' has to be defined if 'config_generator' is used"
+ )
+ if communication_list is None:
+ communication_list = [
+ dict(
+ param_list=[("config", config_generator(node))],
+ label=node,
+ ) for node in node_labels
+ ]
+ place_multinode_call(
+ self.__calls,
+ name,
+ None,
+ communication_list,
+ action="remote/set_sbd_config",
+ )
diff --git a/pcs/test/tools/command_env/config_runner_cib.py b/pcs/test/tools/command_env/config_runner_cib.py
index 797e2a8..cd16af8 100644
--- a/pcs/test/tools/command_env/config_runner_cib.py
+++ b/pcs/test/tools/command_env/config_runner_cib.py
@@ -74,6 +74,33 @@ class CibShortcuts(object):
self.__calls.place(name, call, before=before)
+ def load_content(
+ self,
+ cib,
+ returncode=0,
+ stderr=None,
+ name="runner.cib.load_content",
+ instead=None,
+ before=None,
+ ):
+ """
+ Create call for loading CIB specified by its full content
+
+ string cib -- CIB data (stdout of the loading process)
+ string stderr -- error returned from the loading process
+ int returncode -- exit code of the loading process
+ string name -- key of the call
+ string instead -- key of call instead of which this new call is to be
+ placed
+ string before -- key of call before which this new call is to be placed
+ """
+ command = "cibadmin --local --query"
+ if returncode != 0:
+ call = RunnerCall(command, stderr=stderr, returncode=returncode)
+ else:
+ call = RunnerCall(command, stdout=cib)
+ self.__calls.place(name, call, before=before, instead=instead)
+
def push(
self,
modifiers=None,
diff --git a/pcs/test/tools/command_env/config_runner_pcmk.py b/pcs/test/tools/command_env/config_runner_pcmk.py
index 6499ef8..2dc1c44 100644
--- a/pcs/test/tools/command_env/config_runner_pcmk.py
+++ b/pcs/test/tools/command_env/config_runner_pcmk.py
@@ -3,6 +3,7 @@ from __future__ import (
division,
print_function,
)
+import os
from lxml import etree
@@ -14,6 +15,35 @@ from pcs.test.tools.xml import etree_to_str
DEFAULT_WAIT_TIMEOUT = 10
WAIT_TIMEOUT_EXPIRED_RETURNCODE = 62
+AGENT_FILENAME_MAP = {
+ "ocf:heartbeat:Dummy": "resource_agent_ocf_heartbeat_dummy.xml",
+ "ocf:pacemaker:remote": "resource_agent_ocf_pacemaker_remote.xml",
+}
+
+def fixture_state_resources_xml(
+ resource_id="A", resource_agent="ocf::heartbeat:Dummy", role="Started",
+ failed="false", node_name="node1"
+):
+ return(
+ """
+ <resources>
+ <resource
+ id="{resource_id}"
+ resource_agent="{resource_agent}"
+ role="{role}"
+ failed="{failed}"
+ >
+ <node name="{node_name}" id="1" cached="false"/>
+ </resource>
+ </resources>
+ """.format(
+ resource_id=resource_id,
+ resource_agent=resource_agent,
+ role=role,
+ failed=failed,
+ node_name=node_name,
+ )
+ )
class PcmkShortcuts(object):
def __init__(self, calls):
@@ -23,7 +53,7 @@ class PcmkShortcuts(object):
def load_state(
self, name="runner.pcmk.load_state", filename="crm_mon.minimal.xml",
- resources=None
+ resources=None, raw_resources=None
):
"""
Create call for loading pacemaker state.
@@ -32,7 +62,15 @@ class PcmkShortcuts(object):
string filename -- points to file with the status in the content
string resources -- xml - resources section, will be put to state
"""
+ if resources and raw_resources is not None:
+ raise AssertionError(
+ "Cannot use 'resources' and 'raw_resources' together"
+ )
+
state = etree.fromstring(open(rc(filename)).read())
+ if raw_resources is not None:
+ resources = fixture_state_resources_xml(**raw_resources)
+
if resources:
state.append(complete_state_resources(etree.fromstring(resources)))
@@ -48,7 +86,7 @@ class PcmkShortcuts(object):
self,
name="runner.pcmk.load_agent",
agent_name="ocf:heartbeat:Dummy",
- agent_filename="resource_agent_ocf_heartbeat_dummy.xml",
+ agent_filename=None,
instead=None,
):
"""
@@ -61,15 +99,68 @@ class PcmkShortcuts(object):
string instead -- key of call instead of which this new call is to be
placed
"""
+
+ if agent_filename:
+ agent_metadata_filename = agent_filename
+ elif agent_name in AGENT_FILENAME_MAP:
+ agent_metadata_filename = AGENT_FILENAME_MAP[agent_name]
+ else:
+ raise AssertionError((
+ "Filename with metadata of agent '{0}' not specified.\n"
+ "Please specify file with metadata for agent:\n"
+ " a) explicitly for this test:"
+ " config.runner.pcmk.load_agent(agent_name='{0}',"
+ " filename='FILENAME_HERE.xml')\n"
+ " b) implicitly for agent '{0}' in 'AGENT_FILENAME_MAP' in"
+ " '{1}'\n"
+ "Place agent metadata into '{2}FILENAME_HERE.xml'"
+ ).format(agent_name, os.path.realpath(__file__), rc("")))
+
self.__calls.place(
name,
RunnerCall(
"crm_resource --show-metadata {0}".format(agent_name),
- stdout=open(rc(agent_filename)).read()
+ stdout=open(rc(agent_metadata_filename)).read()
),
instead=instead,
)
+ def load_stonithd_metadata(
+ self,
+ name="runner.pcmk.load_stonithd_metadata",
+ stdout=None,
+ stderr="",
+ returncode=0,
+ instead=None,
+ before=None,
+ ):
+ """
+ Create a call for loading stonithd metadata - additional fence options
+
+ string name -- the key of this call
+ string stdout -- stonithd stdout, default metadata if None
+ string stderr -- stonithd stderr
+ int returncode -- stonithd returncode
+ string instead -- the key of a call instead of which this new call is to
+ be placed
+ string before -- the key of a call before which this new call is to be
+ placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "/usr/libexec/pacemaker/stonithd metadata",
+ stdout=(
+ stdout if stdout is not None
+ else open(rc("stonithd_metadata.xml")).read()
+ ),
+ stderr=stderr,
+ returncode=returncode
+ ),
+ before=before,
+ instead=instead,
+ )
+
def resource_cleanup(
self,
name="runner.pcmk.cleanup",
@@ -153,7 +244,10 @@ class PcmkShortcuts(object):
before=before
)
- def verify(self, name="verify", cib_tempfile=None, stderr=None, verbose=False):
+ def verify(
+ self, name="runner.pcmk.verify", cib_tempfile=None, stderr=None,
+ verbose=False
+ ):
"""
Create call that checks that wait for idle is supported
@@ -172,3 +266,9 @@ class PcmkShortcuts(object):
returncode=(0 if stderr is None else 55),
),
)
+
+ def remove_node(self, node_name, name="runner.pcmk.remove_node"):
+ self.__calls.place(
+ name,
+ RunnerCall("crm_node --force --remove {0}".format(node_name)),
+ )
diff --git a/pcs/test/tools/command_env/config_runner_systemctl.py b/pcs/test/tools/command_env/config_runner_systemctl.py
index f3b10a1..a9ffffd 100644
--- a/pcs/test/tools/command_env/config_runner_systemctl.py
+++ b/pcs/test/tools/command_env/config_runner_systemctl.py
@@ -4,6 +4,8 @@ from __future__ import (
print_function,
)
+from textwrap import dedent
+
from pcs import settings
from pcs.test.tools.command_env.mock_runner import Call as RunnerCall
@@ -33,3 +35,80 @@ class SystemctlShortcuts(object):
**args
)
)
+
+ def is_enabled(
+ self, service, is_enabled=True,
+ name="runner_systemctl.is_enabled", before=None, instead=None
+ ):
+ args = dict(
+ stdout="disabled\n",
+ returncode=1,
+ )
+ if is_enabled:
+ args = dict(
+ stdout="enabled\n",
+ returncode=0,
+ )
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "{bin_path} is-enabled {service}.service".format(
+ bin_path=settings.systemctl_binary,
+ service=service,
+ ),
+ **args
+ ),
+ before=before,
+ instead=instead
+ )
+
+ def list_unit_files(
+ self, unit_file_states,
+ name="runner_systemctl.list_unit_files", before=None, instead=None
+ ):
+ if not unit_file_states:
+ output = dedent(
+ """\
+ UNIT FILE STATE
+
+ 0 unit files listed.
+ """
+ )
+ else:
+ unit_len = max(len(x) for x in unit_file_states) + len(".service")
+ state_len = max(len(x) for x in unit_file_states.values())
+ pattern = "{unit:<{unit_len}} {state:<{state_len}}"
+ lines = (
+ [
+ pattern.format(
+ unit="UNIT FILE", unit_len=unit_len,
+ state="STATE", state_len=state_len
+ )
+ ]
+ +
+ [
+ pattern.format(
+ unit="{0}.service".format(unit), unit_len=unit_len,
+ state=state, state_len=state_len
+ )
+ for unit, state in unit_file_states.items()
+ ]
+ +
+ [
+ "",
+ "{0} unit files listed.".format(len(unit_file_states))
+ ]
+ )
+ output = "\n".join(lines)
+
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "{bin_path} list-unit-files --full".format(
+ bin_path=settings.systemctl_binary
+ ),
+ stdout=output
+ ),
+ before=before,
+ instead=instead
+ )
diff --git a/pcs/test/tools/command_env/mock_fs.py b/pcs/test/tools/command_env/mock_fs.py
new file mode 100644
index 0000000..e93d254
--- /dev/null
+++ b/pcs/test/tools/command_env/mock_fs.py
@@ -0,0 +1,135 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import os.path
+import site
+
+
+CALL_TYPE_FS = "CALL_TYPE_FS"
+
+_FUNC_ARGS = {
+ "open": ["name", "mode", "buffering"],
+ "os.path.exists": ["path"],
+ "os.chmod": ["fd", "mode"],
+ "os.chown": ["fd", "uid", "gid"],
+}
+
+def _ensure_consistent_args(func_name, call_args, call_kwargs):
+ if len(call_args) > len(_FUNC_ARGS[func_name]):
+ raise AssertionError(
+ "{0}() too many positional arguments ({1} > {2})".format(
+ func_name,
+ len(call_args),
+ len(_FUNC_ARGS[func_name]),
+ )
+ )
+
+ param_intersection = (
+ set(_FUNC_ARGS[func_name][:len(call_args)])
+ .intersection(call_kwargs.keys())
+ )
+ if(param_intersection):
+ raise TypeError(
+ "{0}() got multiple values for keyword argument(s) '{1}'".format(
+ func_name,
+ "', '".join(param_intersection)
+ )
+ )
+
+def _get_all_args_as_kwargs(func_name, call_args, call_kwargs):
+ _ensure_consistent_args(func_name, call_args, call_kwargs)
+ kwargs = call_kwargs.copy()
+ for i, arg in enumerate(call_args):
+ kwargs[_FUNC_ARGS[func_name][i]] = arg
+ return kwargs
+
+class Call(object):
+ type = CALL_TYPE_FS
+
+ def __init__(
+ self, func_name, return_value=None, side_effect=None, call_kwargs=None
+ ):
+ """
+ callable check_stdin raises AssertionError when given stdin doesn't
+ match
+ """
+ call_kwargs = call_kwargs if call_kwargs else {}
+
+ #TODO side effect with return_value - mutually exclusive
+
+ self.type = CALL_TYPE_FS
+ self.func_name = func_name
+ self.call_kwargs = call_kwargs
+ self.return_value = return_value
+ self.side_effect = side_effect
+
+ def finish(self):
+ if self.side_effect:
+ if isinstance(self.side_effect, Exception):
+ #pylint: disable=raising-bad-type
+ raise self.side_effect
+ raise AssertionError(
+ "side_effect other than instance of exception not supported yet"
+ )
+
+ return self.return_value
+
+ def __repr__(self):
+ return str("<Fs '{0}' kwargs={1}>").format(
+ self.func_name,
+ self.call_kwargs,
+ )
+
+ def __ne__(self, other):
+ return (
+ self.func_name != other.func_name
+ or
+ self.call_kwargs != other.call_kwargs
+ )
+
+def get_fs_mock(call_queue):
+ package_dir_list = site.getsitepackages()
+ package_dir_list.append(os.path.realpath(
+ os.path.dirname(os.path.abspath(__file__))+"/../../.."
+ ))
+ def get_fs_call(func_name, original_call):
+ def call_fs(*args, **kwargs):
+ # Standard python unittest tries to open some python code (e.g. the
+ # test file for caching when the test raises AssertionError).
+ # It is before it the cleanup is called so at this moment the
+ # function open is still mocked.
+ # Pcs should not open file inside python package in the command so
+ # attempt to open file inside pcs package is almost certainly
+ # outside of library command and we will provide the original
+ # function.
+ if func_name == "open":
+ for python_package_dir in package_dir_list:
+ if args[0].startswith(python_package_dir):
+ return original_call(*args, **kwargs)
+
+ real_call = Call(
+ func_name,
+ call_kwargs=_get_all_args_as_kwargs(func_name, args, kwargs)
+ )
+ dummy_i, expected_call = call_queue.take(
+ CALL_TYPE_FS,
+ repr(real_call)
+ )
+
+ if expected_call != real_call:
+ raise call_queue.error_with_context(
+ "\n expected: '{0}'\n but was: '{1}'".format(
+ expected_call,
+ real_call
+ )
+ )
+
+ return expected_call.finish()
+ return call_fs
+ return get_fs_call
+
+def is_fs_call_in(call_queue):
+ return call_queue.has_type(CALL_TYPE_FS)
diff --git a/pcs/test/tools/command_env/mock_node_communicator.py b/pcs/test/tools/command_env/mock_node_communicator.py
index f96872d..d45cf10 100644
--- a/pcs/test/tools/command_env/mock_node_communicator.py
+++ b/pcs/test/tools/command_env/mock_node_communicator.py
@@ -4,6 +4,8 @@ from __future__ import (
print_function,
)
+import json
+
try:
# python 2
from urlparse import parse_qs
@@ -11,6 +13,15 @@ except ImportError:
# python 3
from urllib.parse import parse_qs
+from pcs.common import pcs_pycurl as pycurl
+from pcs.common.node_communicator import(
+ RequestTarget,
+ RequestData,
+ Request,
+ Response,
+)
+from pcs.test.tools.custom_mock import MockCurlSimple
+
CALL_TYPE_HTTP_ADD_REQUESTS = "CALL_TYPE_HTTP_ADD_REQUESTS"
CALL_TYPE_HTTP_START_LOOP = "CALL_TYPE_HTTP_START_LOOP"
@@ -22,7 +33,7 @@ def log_request(request):
]
if request.target.address_list != [request.target.label]:
- label_data.append(("addres_list", request.target.address_list))
+ label_data.append(("address_list", request.target.address_list))
if request.target.port != "2224":
label_data.append(("port", request.target.port))
@@ -39,7 +50,7 @@ def log_response(response, indent=0):
]
if response.request.target.address_list != [response.request.target.label]:
- label_data.append(("addres_list", response.request.target.address_list))
+ label_data.append(("address_list", response.request.target.address_list))
label_data.append(("was_connected", response.was_connected))
@@ -75,7 +86,6 @@ def different_request_lists(expected_request_list, request_list):
)
)
-
def bad_request_list_content(errors):
return AssertionError(
"Method add_request of NodeCommunicator get different requests"
@@ -94,6 +104,144 @@ def bad_request_list_content(errors):
)
)
+def _communication_to_response(
+ label, address_list, action, param_list, port, token, response_code,
+ output, debug_output, was_connected, errno, error_msg
+):
+ return Response(
+ MockCurlSimple(
+ info={pycurl.RESPONSE_CODE: response_code},
+ output=output,
+ debug_output=debug_output,
+ request=Request(
+ RequestTarget(label, address_list, port, token),
+ RequestData(action, param_list),
+ )
+ ),
+ was_connected=was_connected,
+ errno=errno,
+ error_msg=error_msg,
+ )
+
+def create_communication(
+ communication_list, action="", param_list=None, port=None, token=None,
+ response_code=200, output="", debug_output="", was_connected=True,
+ errno=0, error_msg_template=None
+):
+ """
+ list of dict communication_list -- is setting for one request - response
+ it accepts keys:
+ label -- required, see RequestTarget
+ action -- pcsd url, see RequestData
+ param_list -- list of pairs, see RequestData
+ port -- see RequestTarget
+ token=None -- see RequestTarget
+ response_code -- http response code
+ output -- http response output
+ debug_output -- pycurl debug output
+ was_connected -- see Response
+ errno -- see Response
+ error_msg -- see Response
+ if some key is not present, it is put here from common values - rest
+ args of this fuction(except name, communication_list,
+ error_msg_template)
+ string error_msg_template -- template, the keys for format function will
+ be taken from appropriate item of communication_list
+ string action -- pcsd url, see RequestData
+ list of pairs (tuple) param_list -- see RequestData
+ string port -- see RequestTarget
+ string token=None -- see RequestTarget
+ string response_code -- http response code
+ string output -- http response output
+ string debug_output -- pycurl debug output
+ bool was_connected -- see Response
+ int errno -- see Response
+ string error_msg -- see Response
+ """
+ response_list = []
+
+ common = dict(
+ action=action,
+ param_list=param_list if param_list else (),
+ port=port,
+ token=token,
+ response_code=response_code,
+ output=output,
+ debug_output=debug_output,
+ was_connected=was_connected,
+ errno=errno,
+ )
+ for communication in communication_list:
+ if "address_list" not in communication:
+ communication["address_list"] = [communication["label"]]
+
+ full = common.copy()
+ full.update(communication)
+
+ if "error_msg" not in full:
+ full["error_msg"] = (
+ "" if not error_msg_template
+ else error_msg_template.format(**full)
+ )
+ response_list.append(
+ _communication_to_response(**full)
+ )
+
+ request_list = [response.request for response in response_list]
+
+ return request_list, response_list
+
+def place_multinode_call(
+ calls, name, node_labels=None, communication_list=None, **kwargs
+):
+ """
+ Shortcut for adding a call sending the same request to one or more nodes
+
+ CallListBuilder calls -- list of expected calls
+ string name -- the key of this call
+ list node_labels -- create success responses from these nodes
+ list communication_list -- use these custom responses
+ **kwargs -- see __module__.create_communication
+ """
+ if (
+ (not node_labels and not communication_list)
+ or
+ (node_labels and communication_list)
+ ):
+ raise AssertionError(
+ "Exactly one of 'node_labels', 'communication_list' "
+ "must be specified"
+ )
+ communication_list = communication_list or [
+ {"label": label} for label in node_labels
+ ]
+ place_communication(calls, name, communication_list, **kwargs)
+
+
+def place_requests(calls, name, request_list):
+ calls.place(name, AddRequestCall(request_list))
+
+
+def place_responses(calls, name, response_list):
+ calls.place(name, StartLoopCall(response_list))
+
+
+def place_communication(calls, name, communication_list, **kwargs):
+ if isinstance(communication_list[0], dict):
+ communication_list = [communication_list]
+
+ request_list = []
+ response_list = []
+ for com_list in communication_list:
+ req_list, res_list = create_communication(com_list, **kwargs)
+ request_list.append(req_list)
+ response_list.extend(res_list)
+
+ place_requests(calls, "{0}_requests".format(name), request_list[0])
+ place_responses(calls, "{0}_responses".format(name), response_list)
+ for i, req_list in enumerate(request_list[1:], start=1):
+ place_requests(calls, "{0}_requests_{1}".format(name, i), req_list)
+
class AddRequestCall(object):
type = CALL_TYPE_HTTP_ADD_REQUESTS
@@ -127,6 +275,34 @@ class StartLoopCall(object):
def __repr__(self):
return str("<HttpStartLoop '{0}'>").format(self.response_list)
+def _compare_request_data(expected, real):
+ if expected == real:
+ return True
+
+ # If data is in json format it is not possible to compare it as string.
+ # Because python 3 does not keep key order of dict. So if is response
+ # builded by json.dumps(some_dict) the result string can vary.
+
+ # Let's try known use: [('data_json', 'some_json_here')]
+ # It means only one pair "data_json" + json string: everything else is False
+
+ if len(expected) != 1:
+ return False
+
+ if len(real) != 1:
+ return False
+
+ if expected[0][0] != real[0][0] or expected[0][0] != "data_json":
+ return False
+
+ try:
+ expected_data = json.loads(expected[0][1])
+ real_data = json.loads(real[0][1])
+ return expected_data == real_data
+ except ValueError:
+ return False
+
+
class NodeCommunicator(object):
def __init__(self, call_queue=None):
self.__call_queue = call_queue
@@ -171,10 +347,13 @@ class NodeCommunicator(object):
real_request.target.port
)
- if expected_request.data != real_request.data:
+ if not _compare_request_data(
+ expected_request._data.structured_data,
+ real_request._data.structured_data
+ ):
diff["data"] = (
- parse_qs(expected_request.data),
- parse_qs(real_request.data)
+ expected_request._data.structured_data,
+ real_request._data.structured_data,
)
if diff:
diff --git a/pcs/test/tools/command_env/mock_runner.py b/pcs/test/tools/command_env/mock_runner.py
index 10787f1..393e499 100644
--- a/pcs/test/tools/command_env/mock_runner.py
+++ b/pcs/test/tools/command_env/mock_runner.py
@@ -60,6 +60,7 @@ COMMAND_COMPLETIONS = {
),
"crm_diff": path.join(settings.pacemaker_binaries, "crm_diff"),
"crm_mon": path.join(settings.pacemaker_binaries, "crm_mon"),
+ "crm_node": path.join(settings.pacemaker_binaries, "crm_node"),
"crm_resource": path.join(settings.pacemaker_binaries, "crm_resource"),
"crm_verify": path.join(settings.pacemaker_binaries, "crm_verify"),
"sbd": settings.sbd_binary,
diff --git a/pcs/test/tools/command_env/tools.py b/pcs/test/tools/command_env/tools.py
index 2331d41..d4d4ca8 100644
--- a/pcs/test/tools/command_env/tools.py
+++ b/pcs/test/tools/command_env/tools.py
@@ -18,12 +18,18 @@ def get_env_tools(
default_wait_timeout=DEFAULT_WAIT_TIMEOUT,
default_wait_error_returncode=WAIT_TIMEOUT_EXPIRED_RETURNCODE,
exception_reports_in_processor_by_default=True,
+ local_extensions=None,
+ booth_env=None
):
"""
Shortcut for preparing EnvAssistant and Config
TestCase test_case -- corresponding test_case is used to registering cleanup
method - to assert that everything is finished
+ dict local_extensions -- key is name of a local extension, value is a class
+ that will be used for local extension. So it will be possible to use
+ something like this in a config :
+ config.my_local_extension.my_local_call_shortcut()
"""
env_assistant = EnvAssistant(
@@ -38,4 +44,11 @@ def get_env_tools(
runner.pcmk.default_wait_timeout = default_wait_timeout
runner.pcmk.default_wait_error_returncode = default_wait_error_returncode
+ if local_extensions:
+ for name, ExtensionClass in local_extensions.items():
+ env_assistant.config.add_extension(
+ name,
+ ExtensionClass,
+ )
+
return env_assistant, env_assistant.config
diff --git a/pcs/test/tools/custom_mock.py b/pcs/test/tools/custom_mock.py
index 1d545dc..c05a5a4 100644
--- a/pcs/test/tools/custom_mock.py
+++ b/pcs/test/tools/custom_mock.py
@@ -108,9 +108,14 @@ class MockCurl(object):
class MockCurlSimple(object):
def __init__(self, info=None, output=b"", debug_output=b"", request=None):
self.output_buffer = io.BytesIO()
- self.output_buffer.write(output)
+ self.output_buffer.write(
+ output if isinstance(output, bytes) else output.encode("utf-8")
+ )
self.debug_buffer = io.BytesIO()
- self.debug_buffer.write(debug_output)
+ self.debug_buffer.write(
+ debug_output if isinstance(debug_output, bytes)
+ else debug_output.encode("utf-8")
+ )
self.request_obj = request
self._info = info if info else {}
diff --git a/pcs/test/tools/fixture.py b/pcs/test/tools/fixture.py
index fe8954b..a81e9ad 100644
--- a/pcs/test/tools/fixture.py
+++ b/pcs/test/tools/fixture.py
@@ -49,6 +49,11 @@ def _default_element_attributes(element, default_attributes):
if name not in element.attrib:
element.attrib[name] = value
+def report_variation(report, **info):
+ updated_info = report[2].copy()
+ updated_info.update(info)
+ return report[0], report[1], updated_info, report[3]
+
def debug(code, **kwargs):
return severities.DEBUG, code, kwargs, None
@@ -61,6 +66,95 @@ def error(code, force_code=None, **kwargs):
def info(code, **kwargs):
return severities.INFO, code, kwargs, None
+class ReportStore(object):
+ def __init__(self, names=None, reports=None):
+ if not names:
+ names = []
+
+ duplicate_names = set([n for n in names if names.count(n) > 1])
+ if duplicate_names:
+ raise AssertionError(
+ "Duplicate names are not allowed in ReportStore. "
+ " Found duplications:\n '{0}'".format(
+ "'\n '".join(duplicate_names)
+ )
+ )
+
+ self.__names = names
+ self.__reports = reports or []
+ if len(self.__names) != len(self.__reports):
+ raise AssertionError("Same count reports as names required")
+
+ @property
+ def reports(self):
+ return list(self.__reports)
+
+ def adapt(self, name, **info):
+ index = self.__names.index(name)
+ return ReportStore(self.__names, [
+ report if i != index else report_variation(report, **info)
+ for i, report in enumerate(self.__reports)
+ ])
+
+ def adapt_multi(self, name_list, **info):
+ names, reports = zip(*[
+ (
+ name,
+ report_variation(self[name], **info) if name in name_list
+ else self[name]
+ ) for name in self.__names
+ ])
+ return ReportStore(list(names), list(reports))
+
+ def info(self, name, code, **kwargs):
+ return self.__append(name, info(code, **kwargs))
+
+ def warn(self, name, code, **kwargs):
+ return self.__append(name, warn(code, **kwargs))
+
+ def error(self, name, code, force_code=None, **kwargs):
+ return self.__append(name, error(code, force_code=force_code, **kwargs))
+
+ def as_warn(self, name, as_name):
+ report = self[name]
+ return self.__append(as_name, warn(report[1], **report[2]))
+
+ def copy(self, name, as_name, **info):
+ return self.__append(as_name, report_variation(self[name], **info))
+
+ def remove(self, *name_list):
+ names, reports = zip(*[
+ (name, self[name]) for name in self.__names
+ if name not in name_list
+ ])
+ return ReportStore(list(names), list(reports))
+
+ def select(self, *name_list):
+ names, reports = zip(*[(name, self[name]) for name in name_list])
+ return ReportStore(list(names), list(reports))
+
+ def only(self, name, **info):
+ return ReportStore([name], [report_variation(self[name], **info)])
+
+ def __getitem__(self, spec):
+ if not isinstance(spec, slice):
+ return self.__reports[self.__names.index(spec)]
+
+ assert spec.step is None, "Step is not supported in slicing"
+ start = None if spec.start is None else self.__names.index(spec.start)
+ stop = None if spec.stop is None else self.__names.index(spec.stop)
+
+ return ReportStore(self.__names[start:stop], self.__reports[start:stop])
+
+ def __add__(self, other):
+ return ReportStore(
+ self.__names + other.__names,
+ self.__reports + other.__reports,
+ )
+
+ def __append(self, name, report):
+ return ReportStore(self.__names + [name], self.__reports + [report])
+
def report_not_found(res_id, context_type=""):
return (
severities.ERROR,
@@ -69,7 +163,9 @@ def report_not_found(res_id, context_type=""):
"context_type": context_type,
"context_id": "",
"id": res_id,
- "id_description": "resource/clone/master/group/bundle",
+ "expected_types": [
+ "bundle", "clone", "group", "master", "primitive"
+ ],
},
None
)
diff --git a/pcs/test/tools/fixture_cib.py b/pcs/test/tools/fixture_cib.py
index cd1c7b1..5e7beee 100644
--- a/pcs/test/tools/fixture_cib.py
+++ b/pcs/test/tools/fixture_cib.py
@@ -89,6 +89,18 @@ def replace_all(replacements):
_replace(_find_in(cib_tree, xpath), _xml_to_element(new_content))
return replace
+def append_all(append_map):
+ """
+ Return a function that appends more elements after specified (xpath) element
+ dict append_map -- a key is an xpath pointing to a target element (for
+ appending), value is appended content
+ """
+ def append(cib_tree):
+ for xpath, new_content in append_map.items():
+ _find_in(cib_tree, xpath).append(_xml_to_element(new_content))
+ return append
+
+
#Possible modifier shortcuts are defined here.
#Keep in mind that every key will be named parameter in config function
#(see modifier_shortcuts param in some of pcs.test.tools.command_env.config_*
@@ -106,6 +118,7 @@ def replace_all(replacements):
MODIFIER_GENERATORS = {
"remove": remove,
"replace": replace_all,
+ "append": append_all,
"resources": lambda xml: replace_all({"./configuration/resources": xml}),
"optional_in_conf": lambda xml: put_or_replace("./configuration", xml),
#common modifier `put_or_replace` makes not sense - see explanation inside
diff --git a/pcs/test/tools/pcs_unittest.py b/pcs/test/tools/pcs_unittest.py
index 20f4f0c..45a73d9 100644
--- a/pcs/test/tools/pcs_unittest.py
+++ b/pcs/test/tools/pcs_unittest.py
@@ -2,18 +2,12 @@ import sys
#In package unittest there is no module mock before python 3.3. In python 3
#module mock is not imported by * because module mock is not imported in
#unittest/__init__.py
-major, minor = sys.version_info[:2]
-if major == 2 and minor == 6:
- #we use features that are missing before 2.7 (like test skipping,
- #assertRaises as context manager...) so we need unittest2
- from unittest2 import *
+major = sys.version_info.major
+from unittest import *
+try:
+ import unittest.mock as mock
+except ImportError:
import mock
-else:
- from unittest import *
- try:
- import unittest.mock as mock
- except ImportError:
- import mock
#backport of assert_not_called (new in version 3.5)
if not hasattr(mock.Mock, "assert_not_called"):
@@ -179,4 +173,4 @@ def ensure_raise_from_iterable_side_effect():
mock.Mock.__call__ = create_new_call(mock.Mock.__call__, inPy3k=(major==3))
ensure_raise_from_iterable_side_effect()
-del major, minor, sys
+del major, sys
diff --git a/pcs/usage.py b/pcs/usage.py
index 090a150..ffd2259 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -873,18 +873,19 @@ Commands:
delete <stonith id>
Remove stonith id from configuration.
- enable <stonith id> [--wait[=n]]
- Allow the cluster to use the stonith device. If --wait is specified, pcs
- will wait up to 'n' seconds for the stonith device to start and then
- return 0 if the stonith device is started, or 1 if the stonith device
- has not yet started. If 'n' is not specified it defaults to 60 minutes.
-
- disable <stonith id> [--wait[=n]]
- Attempt to stop the stonith device if it is running and disallow the
- cluster to use it. If --wait is specified, pcs will wait up to 'n'
- seconds for the stonith device to stop and then return 0 if the stonith
- device is stopped or 1 if the stonith device has not stopped. If 'n' is
- not specified it defaults to 60 minutes.
+ enable <stonith id>... [--wait[=n]]
+ Allow the cluster to use the stonith devices. If --wait is specified,
+ pcs will wait up to 'n' seconds for the stonith devices to start and
+ then return 0 if the stonith devices are started, or 1 if the stonith
+ devices have not yet started. If 'n' is not specified it defaults to 60
+ minutes.
+
+ disable <stonith id>... [--wait[=n]]
+ Attempt to stop the stonith devices if they are running and disallow
+ the cluster to use them. If --wait is specified, pcs will wait up to
+ 'n' seconds for the stonith devices to stop and then return 0 if the
+ stonith devices are stopped or 1 if the stonith devices have not
+ stopped. If 'n' is not specified it defaults to 60 minutes.
cleanup [<stonith id>] [--node <node>]
Make the cluster forget failed operations from history of the stonith
@@ -1309,6 +1310,9 @@ Commands:
--full will give more detailed output. If <cluster name> is specified,
only information about the specified cluster will be displayed.
+ booth
+ Print current status of booth on the local node.
+
nodes [corosync | both | config]
View current status of nodes from pacemaker. If 'corosync' is
specified, view current status of nodes from corosync instead. If
diff --git a/pcs/utils.py b/pcs/utils.py
index 5b60823..b589a2a 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -88,7 +88,7 @@ except ImportError:
from urllib.parse import urlencode as urllib_urlencode
-PYTHON2 = sys.version[0] == "2"
+PYTHON2 = (sys.version_info.major == 2)
# usefile & filename variables are set in pcs module
usefile = False
@@ -973,7 +973,9 @@ def touch_cib_file(filename):
try:
write_empty_cib(filename)
except EnvironmentError as e:
- err("Unable to write to file: '{0}': '{1}'".format(filename, str(e)))
+ err(
+ "Unable to write to file: '{0}': '{1}'".format(filename, str(e))
+ )
# Run command, with environment and return (output, retval)
# DEPRECATED, please use lib.external.CommandRunner via utils.cmd_runner()
@@ -1449,11 +1451,7 @@ def validate_constraint_resource(dom, resource_id):
return True, "", resource_id
if "--force" in pcs_options:
- return (
- True,
- "",
- clone_el.getAttribute("id") if clone_el else resource_id
- )
+ return True, "", clone_el.getAttribute("id")
if clone_el.tagName == "clone":
return (
@@ -2831,7 +2829,7 @@ def get_library_wrapper():
return Library(get_cli_env(), get_middleware_factory())
-def get_modificators():
+def get_modifiers():
#please keep in mind that this is not final implemetation
#beside missing support of other possible options, cases may arise that can
#not be solved using a dict - for example "wait" - maybe there will be
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index c47b1c4..e2f219c 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -49,7 +49,7 @@ def get_pcs_path()
end
end
-PCS_VERSION = '0.9.162'
+PCS_VERSION = '0.9.163'
# unique instance signature, allows detection of dameon restarts
DAEMON_INSTANCE_SIGNATURE = Digest::SHA2.hexdigest("#{Time.now} #{rand()}")
COROSYNC = COROSYNC_BINARIES + "corosync"
diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
index 14b5573..55934c0 100644
--- a/pcsd/cfgsync.rb
+++ b/pcsd/cfgsync.rb
@@ -730,7 +730,7 @@ module Cfgsync
def self.save_sync_new_version(
config, nodes, cluster_name, fetch_on_conflict, tokens={}, ports={}
)
- if not cluster_name or cluster_name.empty?
+ if not cluster_name or cluster_name.empty? or not nodes or nodes.empty?
# we run on a standalone host, no config syncing
config.version += 1
config.save()
@@ -786,7 +786,7 @@ module Cfgsync
with_new_tokens.tokens.update(new_tokens)
with_new_tokens.ports.update(new_ports)
config_new = PcsdTokens.from_text(with_new_tokens.text)
- if not cluster_name or cluster_name.empty?
+ if not cluster_name or cluster_name.empty? or not nodes or nodes.empty?
# we run on a standalone host, no config syncing
config_new.version += 1
config_new.save()
@@ -795,7 +795,7 @@ module Cfgsync
# we run in a cluster so we need to sync the config
publisher = ConfigPublisher.new(
PCSAuth.getSuperuserAuth(), [config_new], nodes, cluster_name,
- new_tokens
+ new_tokens, new_ports
)
old_configs, node_responses = publisher.publish()
if not old_configs.include?(config_new.class.name)
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
index 21092c5..3675719 100644
--- a/pcsd/cluster_entity.rb
+++ b/pcsd/cluster_entity.rb
@@ -516,6 +516,28 @@ module ClusterEntity
@utilization << ClusterEntity::NvPair.from_dom(e)
}
@stonith = @_class == 'stonith'
+ if @stonith
+ @instance_attr.each{ |attr|
+ if attr.name == 'action'
+ @warning_list << {
+ :message => (
+ 'This fence-device has the "action" option set, it is ' +
+ 'recommended to set "pcmk_off_action", "pcmk_reboot_action" ' +
+ 'instead'
+ )
+ }
+ end
+ if attr.name == 'method' and attr.value == 'cycle'
+ @warning_list << {
+ :message => (
+ 'This fence-device has the "method" option set to "cycle" ' +
+ 'which is potentially dangerous, please consider using ' +
+ '"onoff"'
+ )
+ }
+ end
+ }
+ end
if @id and rsc_status
@crm_status = rsc_status[@id] || []
end
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 68d2e7e..07be9b1 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -1247,11 +1247,10 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
auth_responses.each { |node, response|
if 'ok' == response['status']
new_tokens[node] = response['token']
- ports[node] = nodes[node]
+ ports[node] = nodes[node] || PCSD_DEFAULT_PORT
end
}
if not new_tokens.empty?
- cluster_nodes = get_corosync_nodes()
tokens_cfg = Cfgsync::PcsdTokens.from_file()
# only tokens used in pcsd-to-pcsd communication can and need to be synced
# those are accessible only when running under root account
@@ -1262,10 +1261,10 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
)
return auth_responses, sync_successful, sync_failed_nodes, sync_responses
end
+ cluster_nodes = get_corosync_nodes()
sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
tokens_cfg, new_tokens, cluster_nodes, $cluster_name, ports
)
- sync_failed_nodes = []
sync_not_supported_nodes = []
sync_responses.each { |node, response|
if 'not_supported' == response['status']
@@ -1808,7 +1807,7 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
:message => 'Unable to connect to the cluster.'
}
end
- status[:status] == 'unknown'
+ status[:status] = 'unknown'
return status
end
diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
index a824a48..6b63259 100644
--- a/pcsd/pcsd.8
+++ b/pcsd/pcsd.8
@@ -1,9 +1,9 @@
-.TH PCSD "8" "November 2017" "pcs 0.9.162" "System Administration Utilities"
+.TH PCSD "8" "February 2018" "pcs 0.9.163" "System Administration Utilities"
.SH NAME
pcsd \- pacemaker/corosync configuration system daemon
.SH DESCRIPTION
-Daemon for controlling and configuring pacakamer/corosync clusters via pcs.
+Daemon for controlling and configuring pacemaker/corosync clusters via pcs.
.SH ENVIRONMENT
diff --git a/pcsd/pcsd.logrotate b/pcsd/pcsd.logrotate
index 36d2529..d105cff 100644
--- a/pcsd/pcsd.logrotate
+++ b/pcsd/pcsd.logrotate
@@ -1,4 +1,4 @@
-/var/log/pcsd/pcsd.log {
+/var/log/pcsd/*.log {
rotate 5
weekly
missingok
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index f97dabc..01f2f5c 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -132,11 +132,14 @@ $thread_cfgsync = Thread.new {
begin
# do not sync if this host is not in a cluster
cluster_name = get_cluster_name()
- if cluster_name and !cluster_name.empty?()
+ cluster_nodes = get_corosync_nodes()
+ if cluster_name and !cluster_name.empty?() and cluster_nodes and !cluster_nodes.empty?
$logger.debug('Config files sync thread fetching')
fetcher = Cfgsync::ConfigFetcher.new(
- PCSAuth.getSuperuserAuth(), Cfgsync::get_cfg_classes(),
- get_corosync_nodes(), cluster_name
+ PCSAuth.getSuperuserAuth(),
+ Cfgsync::get_cfg_classes(),
+ cluster_nodes,
+ cluster_name
)
cfgs_to_save, _ = fetcher.fetch()
cfgs_to_save.each { |cfg_to_save|
@@ -211,6 +214,9 @@ helpers do
if param == "disabled"
meta_options << 'meta' << 'target-role=Stopped'
end
+ if param == "force" and val
+ param_line << "--force"
+ end
}
return param_line + meta_options
end
diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
index a28faab..2adff28 100644
--- a/pcsd/pcsd.service
+++ b/pcsd/pcsd.service
@@ -6,7 +6,7 @@ Documentation=man:pcs(8)
[Service]
EnvironmentFile=/etc/sysconfig/pcsd
Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
-ExecStart=/usr/lib/pcsd/pcsd > /dev/null &
+ExecStart=/usr/lib/pcsd/pcsd
Type=notify
[Install]
diff --git a/pcsd/pcsd.service.debian b/pcsd/pcsd.service.debian
index d37ed5a..29f336f 100644
--- a/pcsd/pcsd.service.debian
+++ b/pcsd/pcsd.service.debian
@@ -5,7 +5,7 @@ Documentation=man:pcs(8)
[Service]
EnvironmentFile=/etc/default/pcsd
-ExecStart=/usr/bin/ruby -C/var/lib/pcsd -I/usr/share/pcsd -- /usr/share/pcsd/ssl.rb & > /dev/null &
+ExecStart=/usr/bin/ruby -C/var/lib/pcsd -I/usr/share/pcsd -- /usr/share/pcsd/ssl.rb
[Install]
WantedBy=multi-user.target
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index b7e9a7a..1876f82 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -286,9 +286,24 @@ function create_node(form) {
});
}
+function create_resource_error_processing(error_message, form, update, stonith) {
+ var message = (
+ "Unable to " + (update ? "update " : "add ") + name + "\n" + error_message
+ );
+ if (message.indexOf('--force') == -1) {
+ alert(message);
+ }
+ else {
+ message = message.replace(', use --force to override', '');
+ if (confirm(message + "\n\nDo you want to force the operation?")) {
+ create_resource(form, update, stonith, true)
+ }
+ }
+}
+
// If update is set to true we update the resource instead of create it
// if stonith is set to true we update/create a stonith agent
-function create_resource(form, update, stonith) {
+function create_resource(form, update, stonith, force) {
var data = {};
$($(form).serializeArray()).each(function(index, obj) {
data[obj.name] = obj.value;
@@ -303,6 +318,9 @@ function create_resource(form, update, stonith) {
} else {
name = "resource";
}
+ if (force) {
+ data["force"] = force;
+ }
ajax_wrapper({
type: "POST",
@@ -312,7 +330,9 @@ function create_resource(form, update, stonith) {
success: function(returnValue) {
$('input.apply_changes').show();
if (returnValue["error"] == "true") {
- alert(returnValue["stderr"]);
+ create_resource_error_processing(
+ returnValue["stderr"], form, update, stonith
+ );
} else {
Pcs.update();
if (!update) {
@@ -326,18 +346,9 @@ function create_resource(form, update, stonith) {
}
},
error: function(xhr, status, error) {
- if (update) {
- alert(
- "Unable to update " + name + " "
- + ajax_simple_error(xhr, status, error)
- );
- }
- else {
- alert(
- "Unable to add " + name + " "
- + ajax_simple_error(xhr, status, error)
- );
- }
+ create_resource_error_processing(
+ ajax_simple_error(xhr, status, error), form, update, stonith
+ );
$('input.apply_changes').show();
}
});
@@ -2350,6 +2361,10 @@ function resource_unclone(resource_id) {
}
show_loading_screen();
var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+ if (!resource_obj) {
+ console.log("Resource '" + resource_id + "' not found.");
+ return;
+ }
if (resource_obj.get('class_type') == 'clone') {
resource_id = resource_obj.get('member').get('id');
}
@@ -2398,6 +2413,10 @@ function resource_change_group(resource_id, form) {
}
show_loading_screen();
var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+ if (!resource_obj) {
+ console.log("Resource '" + resource_id + "' not found.");
+ return;
+ }
var data = {
resource_id: resource_id
};
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index e1e95a8..518e668 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -1494,7 +1494,7 @@ def update_resource (params, request, auth_user)
end
resource_group = params[:resource_group]
end
- if params[:resource_type] == "ocf:pacemaker:remote"
+ if params[:resource_type] == "ocf:pacemaker:remote" and not cmd.include?("--force")
# Workaround for Error: this command is not sufficient for create remote
# connection, use 'pcs cluster node add-remote', use --force to override.
# It is not possible to specify meta attributes so we don't need to take
diff --git a/pcsd/test/cib1.xml b/pcsd/test/cib1.xml
index f603f24..03749ab 100644
--- a/pcsd/test/cib1.xml
+++ b/pcsd/test/cib1.xml
@@ -28,6 +28,8 @@
<primitive class="stonith" id="node2-stonith" type="fence_xvm">
<instance_attributes id="node2-stonith-instance_attributes">
<nvpair id="node2-stonith-instance_attributes-domain" name="domain" value="node2"/>
+ <nvpair id="node2-stonith-instance_attributes-action" name="action" value="monitor"/>
+ <nvpair id="node2-stonith-instance_attributes-method" name="method" value="cycle"/>
</instance_attributes>
<operations>
<op id="node2-stonith-monitor-interval-60s" interval="60s" name="monitor"/>
diff --git a/pcsd/test/test_cluster_entity.rb b/pcsd/test/test_cluster_entity.rb
index 2b67e19..60719ef 100644
--- a/pcsd/test/test_cluster_entity.rb
+++ b/pcsd/test/test_cluster_entity.rb
@@ -719,6 +719,59 @@ class TestPrimitive < Test::Unit::TestCase
assert(obj.operations.empty?)
end
+ def test_init_stonith_with_warnings
+ obj = ClusterEntity::Primitive.new(
+ @cib.elements["//primitive[@id='node2-stonith']"]
+ )
+ assert_nil(obj.parent)
+ assert_nil(obj.get_master)
+ assert_nil(obj.get_clone)
+ assert_nil(obj.get_group)
+ assert(obj.meta_attr.empty?)
+ assert_equal('node2-stonith', obj.id)
+ assert(obj.error_list.empty?)
+ assert_equal(
+ obj.warning_list,
+ [
+ {
+ :message => (
+ 'This fence-device has the "action" option set, it is ' +
+ 'recommended to set "pcmk_off_action", "pcmk_reboot_action" instead'
+ )
+ },
+ {
+ :message => (
+ 'This fence-device has the "method" option set to "cycle" which ' +
+ 'is potentially dangerous, please consider using "onoff"'
+ )
+ }
+ ]
+ )
+ assert_equal('stonith:fence_xvm', obj.agentname)
+ assert_equal('stonith', obj._class)
+ assert_nil(obj.provider)
+ assert_equal('fence_xvm', obj.type)
+ assert(obj.stonith)
+ instance_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+ 'node2-stonith-instance_attributes-domain',
+ 'domain',
+ 'node2'
+ )
+ instance_attr << ClusterEntity::NvPair.new(
+ 'node2-stonith-instance_attributes-action',
+ 'action',
+ 'monitor'
+ )
+ instance_attr << ClusterEntity::NvPair.new(
+ 'node2-stonith-instance_attributes-method',
+ 'method',
+ 'cycle'
+ )
+ assert_equal_NvSet(instance_attr, obj.instance_attr)
+ assert(obj.crm_status.empty?)
+ assert(obj.operations.empty?)
+ end
+
def test_init_stonith_with_crm
obj = ClusterEntity::Primitive.new(
@cib.elements["//primitive[@id='node1-stonith']"],
diff --git a/pylintrc b/pylintrc
index 09df745..22dc9c6 100644
--- a/pylintrc
+++ b/pylintrc
@@ -64,7 +64,7 @@
#W0703: [broad-except] Catching too general exception %s
#W0710: [nonstandard-exception] Exception doesn't inherit from standard "Exception" class
#W1401: [anomalous-backslash-in-string] Anomalous backslash in string: \'%s\'. String constant might be missing an r prefix.
-disable=no-name-in-module, import-error, nonstandard-exception, unused-argument, redefined-outer-name, bare-except, anomalous-backslash-in-string, no-member, star-args, undefined-loop-variable, maybe-no-member, broad-except, too-few-public-methods, not-callable, protected-access, method-hidden, too-many-arguments, global-statement, unbalanced-tuple-unpacking, fixme, lost-exception, dangerous-default-value, too-many-return-statements, no-self-use, no-init, redefined-builtin, wildcard-imp [...]
+disable=no-name-in-module, import-error, nonstandard-exception, unused-argument, redefined-outer-name, bare-except, anomalous-backslash-in-string, no-member, star-args, undefined-loop-variable, maybe-no-member, broad-except, too-few-public-methods, not-callable, protected-access, method-hidden, too-many-arguments, global-statement, unbalanced-tuple-unpacking, fixme, lost-exception, dangerous-default-value, too-many-return-statements, no-self-use, no-init, redefined-builtin, wildcard-imp [...]
[DESIGN]
# Maximum number of locals for function / method body
diff --git a/setup.py b/setup.py
index 0e763f4..3f8e8e2 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ class CleanCommand(Command):
setup(
name='pcs',
- version='0.9.162',
+ version='0.9.163',
description='Pacemaker Configuration System',
author='Chris Feist',
author_email='cfeist at redhat.com',
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git
More information about the Debian-HA-Commits
mailing list