[Debian-ha-commits] [pcs] 01/14: New upstream version 0.9.160
Valentin Vidic
vvidic-guest at moszumanska.debian.org
Sat Oct 14 11:56:42 UTC 2017
This is an automated email from the git hooks/post-receive script.
vvidic-guest pushed a commit to branch master
in repository pcs.
commit 48c68e068cde59ef7c0f25f09bc4f93c177b6d3f
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date: Fri Oct 13 08:29:58 2017 +0200
New upstream version 0.9.160
---
CHANGELOG.md | 53 +-
Makefile | 25 +-
README | 49 -
README.md | 187 +-
maketarballs.py | 1 -
newversion.py | 1 -
pcs/acl.py | 1 -
pcs/alert.py | 1 -
pcs/app.py | 1 -
pcs/booth.py | 1 -
pcs/cli/booth/command.py | 1 -
pcs/cli/booth/console_report.py | 1 -
pcs/cli/booth/env.py | 1 -
pcs/cli/booth/test/test_command.py | 1 -
pcs/cli/booth/test/test_env.py | 1 -
pcs/cli/booth/test/test_reports.py | 1 -
pcs/cli/cluster/command.py | 9 +-
pcs/cli/cluster/test/test_command.py | 1 -
pcs/cli/common/completion.py | 1 -
pcs/cli/common/console_report.py | 31 +-
pcs/cli/common/env_cli.py | 4 +-
pcs/cli/common/env_file.py | 1 -
pcs/cli/common/errors.py | 1 -
pcs/cli/common/lib_wrapper.py | 41 +-
pcs/cli/common/middleware.py | 34 +-
pcs/cli/common/parse_args.py | 1 -
pcs/cli/common/reports.py | 28 +-
pcs/cli/common/test/test_completion.py | 1 -
pcs/cli/common/test/test_console_report.py | 77 +-
pcs/cli/common/test/test_env_file.py | 1 -
pcs/cli/common/test/test_lib_wrapper.py | 1 -
pcs/cli/common/test/test_middleware.py | 1 -
pcs/cli/common/test/test_parse_args.py | 1 -
pcs/cli/common/test/test_reports.py | 1 -
pcs/cli/constraint/command.py | 1 -
pcs/cli/constraint/console_report.py | 1 -
pcs/cli/constraint/parse_args.py | 1 -
pcs/cli/constraint/test/test_command.py | 1 -
pcs/cli/constraint/test/test_console_report.py | 1 -
pcs/cli/constraint/test/test_parse_args.py | 1 -
pcs/cli/constraint_all/console_report.py | 1 -
pcs/cli/constraint_all/test/test_console_report.py | 1 -
pcs/cli/constraint_colocation/command.py | 1 -
pcs/cli/constraint_colocation/console_report.py | 1 -
pcs/cli/constraint_order/command.py | 1 -
pcs/cli/constraint_order/console_report.py | 1 -
pcs/cli/constraint_ticket/command.py | 1 -
pcs/cli/constraint_ticket/console_report.py | 1 -
pcs/cli/constraint_ticket/parse_args.py | 1 -
pcs/cli/constraint_ticket/test/test_command.py | 1 -
.../constraint_ticket/test/test_console_report.py | 1 -
pcs/cli/constraint_ticket/test/test_parse_args.py | 1 -
pcs/cli/fencing_topology.py | 1 -
pcs/cli/resource/parse_args.py | 1 -
pcs/cli/resource/test/test_parse_args.py | 1 -
pcs/cluster.py | 228 ++-
pcs/common/env_file_role_codes.py | 1 -
pcs/common/fencing_topology.py | 1 -
pcs/common/node_communicator.py | 558 +++++
pcs/common/pcs_pycurl.py | 1 -
pcs/common/report_codes.py | 7 +-
pcs/common/test/test_node_communicator.py | 576 ++++++
pcs/common/test/test_tools.py | 1 -
pcs/common/tools.py | 1 -
pcs/config.py | 1 -
pcs/constraint.py | 3 +-
pcs/lib/booth/config_exchange.py | 1 -
pcs/lib/booth/config_files.py | 1 -
pcs/lib/booth/config_parser.py | 1 -
pcs/lib/booth/config_structure.py | 1 -
pcs/lib/booth/env.py | 1 -
pcs/lib/booth/reports.py | 1 -
pcs/lib/booth/resource.py | 1 -
pcs/lib/booth/status.py | 1 -
pcs/lib/booth/sync.py | 135 +-
pcs/lib/booth/test/test_config_exchange.py | 1 -
pcs/lib/booth/test/test_config_files.py | 1 -
pcs/lib/booth/test/test_config_parser.py | 1 -
pcs/lib/booth/test/test_config_structure.py | 1 -
pcs/lib/booth/test/test_env.py | 1 -
pcs/lib/booth/test/test_resource.py | 1 -
pcs/lib/booth/test/test_status.py | 1 -
pcs/lib/booth/test/test_sync.py | 422 ++--
pcs/lib/cib/acl.py | 1 -
pcs/lib/cib/alert.py | 1 -
pcs/lib/cib/constraint/colocation.py | 1 -
pcs/lib/cib/constraint/constraint.py | 1 -
pcs/lib/cib/constraint/order.py | 1 -
pcs/lib/cib/constraint/resource_set.py | 1 -
pcs/lib/cib/constraint/ticket.py | 8 +-
pcs/lib/cib/fencing_topology.py | 1 -
pcs/lib/cib/node.py | 1 -
pcs/lib/cib/nvpair.py | 14 +-
pcs/lib/cib/resource/__init__.py | 1 -
pcs/lib/cib/resource/bundle.py | 7 +-
pcs/lib/cib/resource/clone.py | 1 -
pcs/lib/cib/resource/common.py | 1 -
pcs/lib/cib/resource/group.py | 1 -
pcs/lib/cib/resource/guest_node.py | 5 +-
pcs/lib/cib/resource/operations.py | 1 -
pcs/lib/cib/resource/primitive.py | 1 -
pcs/lib/cib/resource/remote_node.py | 1 -
pcs/lib/cib/sections.py | 64 +
pcs/lib/cib/stonith.py | 1 -
pcs/lib/cib/test/test_acl.py | 1 -
pcs/lib/cib/test/test_alert.py | 1 -
pcs/lib/cib/test/test_constraint.py | 1 -
pcs/lib/cib/test/test_constraint_colocation.py | 1 -
pcs/lib/cib/test/test_constraint_order.py | 1 -
pcs/lib/cib/test/test_constraint_ticket.py | 1 -
pcs/lib/cib/test/test_fencing_topology.py | 1 -
pcs/lib/cib/test/test_node.py | 1 -
pcs/lib/cib/test/test_nvpair.py | 1 -
pcs/lib/cib/test/test_resource_bundle.py | 1 -
pcs/lib/cib/test/test_resource_clone.py | 1 -
pcs/lib/cib/test/test_resource_common.py | 1 -
pcs/lib/cib/test/test_resource_group.py | 1 -
pcs/lib/cib/test/test_resource_guest_node.py | 1 -
pcs/lib/cib/test/test_resource_operations.py | 1 -
pcs/lib/cib/test/test_resource_primitive.py | 1 -
pcs/lib/cib/test/test_resource_remote_node.py | 1 -
pcs/lib/cib/test/test_resource_set.py | 1 -
pcs/lib/cib/test/test_sections.py | 78 +
pcs/lib/cib/test/test_tools.py | 1 -
pcs/lib/cib/tools.py | 38 +-
pcs/lib/cluster_conf_facade.py | 1 -
pcs/lib/commands/acl.py | 6 +-
pcs/lib/commands/alert.py | 35 +-
pcs/lib/commands/booth.py | 42 +-
pcs/lib/commands/cib_options.py | 38 +
pcs/lib/commands/cluster.py | 541 +----
pcs/lib/commands/constraint/colocation.py | 1 -
pcs/lib/commands/constraint/common.py | 3 +-
pcs/lib/commands/constraint/order.py | 1 -
pcs/lib/commands/constraint/ticket.py | 8 +-
pcs/lib/commands/fencing_topology.py | 15 +-
pcs/lib/commands/node.py | 6 +-
pcs/lib/commands/qdevice.py | 1 -
pcs/lib/commands/quorum.py | 151 +-
pcs/lib/commands/{cluster.py => remote_node.py} | 123 +-
pcs/lib/commands/resource.py | 6 +-
pcs/lib/commands/resource_agent.py | 1 -
pcs/lib/commands/sbd.py | 299 +--
pcs/lib/commands/stonith.py | 1 -
pcs/lib/commands/stonith_agent.py | 1 -
pcs/lib/commands/test/cib_options/__init__.py | 0
.../test/cib_options/test_operations_defaults.py | 105 +
.../test/cib_options/test_resources_defaults.py | 109 +
pcs/lib/commands/test/cluster/__init__.py | 0
pcs/lib/commands/test/cluster/verify.py | 119 ++
pcs/lib/commands/test/resource/common.py | 76 -
.../commands/test/resource/test_bundle_create.py | 1487 +++++++-------
.../commands/test/resource/test_bundle_update.py | 1065 +++++-----
.../commands/test/resource/test_resource_create.py | 2148 ++++++++++++--------
.../test/resource/test_resource_enable_disable.py | 1276 ++++++------
.../test/resource/test_resource_manage_unmanage.py | 725 ++++---
pcs/lib/commands/test/sbd/__init__.py | 0
pcs/lib/commands/test/sbd/test_disable_sbd.py | 57 +
pcs/lib/commands/test/sbd/test_enable_sbd.py | 172 ++
.../test/sbd/test_get_cluster_sbd_config.py | 124 ++
.../test/sbd/test_get_cluster_sbd_status.py | 149 ++
pcs/lib/commands/test/test_acl.py | 5 +-
pcs/lib/commands/test/test_alert.py | 998 ++++-----
pcs/lib/commands/test/test_booth.py | 5 +-
pcs/lib/commands/test/test_constraint_common.py | 9 +-
pcs/lib/commands/test/test_fencing_topology.py | 7 +-
pcs/lib/commands/test/test_node.py | 3 +-
pcs/lib/commands/test/test_resource_agent.py | 1 -
pcs/lib/commands/test/test_stonith_agent.py | 38 +-
pcs/lib/commands/test/test_ticket.py | 62 +-
pcs/lib/communication/__init__.py | 0
pcs/lib/communication/booth.py | 113 +
pcs/lib/communication/corosync.py | 89 +
pcs/lib/communication/nodes.py | 261 +++
pcs/lib/communication/qdevice.py | 82 +
pcs/lib/communication/qdevice_net.py | 128 ++
pcs/lib/communication/sbd.py | 268 +++
pcs/lib/communication/test/__init__.py | 0
pcs/lib/communication/test/test_nodes.py | 125 ++
pcs/lib/communication/tools.py | 314 +++
pcs/lib/corosync/config_facade.py | 1 -
pcs/lib/corosync/config_parser.py | 1 -
pcs/lib/corosync/live.py | 14 -
pcs/lib/corosync/qdevice_client.py | 62 -
pcs/lib/corosync/qdevice_net.py | 81 -
pcs/lib/env.py | 271 ++-
pcs/lib/env_file.py | 1 -
pcs/lib/env_tools.py | 1 -
pcs/lib/errors.py | 6 +-
pcs/lib/external.py | 73 +-
pcs/lib/node.py | 1 -
pcs/lib/node_communication.py | 177 ++
pcs/lib/node_communication_format.py | 1 -
pcs/lib/nodes_task.py | 521 -----
pcs/lib/pacemaker/env.py | 1 -
pcs/lib/pacemaker/live.py | 87 +-
pcs/lib/pacemaker/state.py | 1 -
pcs/lib/pacemaker/test/test_live.py | 134 +-
pcs/lib/pacemaker/test/test_state.py | 1 -
pcs/lib/pacemaker/test/test_values.py | 1 -
pcs/lib/pacemaker/values.py | 1 -
pcs/lib/reports.py | 92 +-
pcs/lib/resource_agent.py | 52 +-
pcs/lib/sbd.py | 356 +---
pcs/lib/test/misc.py | 1 -
pcs/lib/test/test_cluster_conf_facade.py | 1 -
pcs/lib/test/test_env.py | 1207 ++++++-----
pcs/lib/test/test_env_cib.py | 448 ++++
pcs/lib/test/test_env_file.py | 1 -
pcs/lib/test/test_errors.py | 1 -
pcs/lib/test/test_node_communication.py | 539 +++++
pcs/lib/test/test_node_communication_format.py | 1 -
pcs/lib/test/test_nodes_task.py | 167 +-
pcs/lib/test/test_resource_agent.py | 62 +-
pcs/lib/test/test_validate.py | 1 -
pcs/{ => lib}/test/test_xml_tools.py | 40 +-
pcs/lib/tools.py | 16 +-
pcs/lib/validate.py | 1 -
pcs/lib/xml_tools.py | 42 +-
pcs/node.py | 1 -
pcs/pcs.8 | 18 +-
pcs/pcsd.py | 1 -
pcs/prop.py | 1 -
pcs/qdevice.py | 1 -
pcs/quorum.py | 1 -
pcs/resource.py | 27 +-
pcs/rule.py | 1 -
pcs/settings_default.py | 3 +-
pcs/status.py | 1 -
pcs/stonith.py | 1 -
pcs/test/cib_resource/common.py | 1 -
pcs/test/cib_resource/stonith_common.py | 1 -
pcs/test/cib_resource/test_bundle.py | 1 -
pcs/test/cib_resource/test_create.py | 67 +-
pcs/test/cib_resource/test_manage_unmanage.py | 1 -
pcs/test/cib_resource/test_operation_add.py | 1 -
pcs/test/cib_resource/test_stonith_create.py | 1 -
.../cib_resource/test_stonith_enable_disable.py | 1 -
pcs/test/curl_test.py | 63 +
pcs/test/resources/cib-empty.xml | 2 +-
pcs/test/resources/cib-large.xml | 2 +-
.../resource_agent_ocf_heartbeat_dummy.xml | 2 +-
...ce_agent_ocf_heartbeat_dummy_insane_action.xml} | 3 +-
pcs/test/suite.py | 1 -
pcs/test/test_acl.py | 21 +-
pcs/test/test_alert.py | 2 -
pcs/test/test_booth.py | 7 +-
pcs/test/test_cluster.py | 9 +-
pcs/test/test_cluster_pcmk_remote.py | 10 +-
pcs/test/test_common_tools.py | 1 -
pcs/test/test_constraints.py | 34 +-
pcs/test/test_lib_commands_qdevice.py | 1 -
pcs/test/test_lib_commands_quorum.py | 14 +-
pcs/test/test_lib_commands_sbd.py | 336 +--
pcs/test/test_lib_corosync_config_facade.py | 7 +-
pcs/test/test_lib_corosync_config_parser.py | 3 +-
pcs/test/test_lib_corosync_live.py | 5 +-
pcs/test/test_lib_corosync_qdevice_client.py | 1 -
pcs/test/test_lib_corosync_qdevice_net.py | 17 +-
pcs/test/test_lib_external.py | 128 +-
pcs/test/test_lib_node.py | 41 +-
pcs/test/test_lib_sbd.py | 97 +-
pcs/test/test_lib_tools.py | 1 -
pcs/test/test_node.py | 7 +-
pcs/test/test_properties.py | 7 +-
pcs/test/test_quorum.py | 1 -
pcs/test/test_resource.py | 49 +-
pcs/test/test_rule.py | 7 +-
pcs/test/test_status.py | 1 -
pcs/test/test_stonith.py | 7 +-
pcs/test/test_utils.py | 25 +-
pcs/test/tools/assertions.py | 75 +-
pcs/test/tools/check/test_misc.py | 1 -
pcs/test/tools/cib.py | 1 -
pcs/test/tools/color_text_runner/__init__.py | 1 -
pcs/test/tools/color_text_runner/format.py | 1 -
pcs/test/tools/color_text_runner/result.py | 1 -
pcs/test/tools/color_text_runner/writer.py | 1 -
pcs/test/tools/command_env/__init__.py | 83 +
pcs/test/tools/command_env/assistant.py | 185 ++
pcs/test/tools/command_env/calls.py | 240 +++
pcs/test/tools/command_env/config.py | 71 +
pcs/test/tools/command_env/config_corosync_conf.py | 53 +
pcs/test/tools/command_env/config_env.py | 84 +
pcs/test/tools/command_env/config_http.py | 126 ++
pcs/test/tools/command_env/config_runner.py | 43 +
pcs/test/tools/command_env/config_runner_cib.py | 210 ++
.../tools/command_env/config_runner_corosync.py | 40 +
pcs/test/tools/command_env/config_runner_pcmk.py | 133 ++
.../tools/command_env/config_runner_systemctl.py | 35 +
.../command_env/mock_get_local_corosync_conf.py | 23 +
.../tools/command_env/mock_node_communicator.py | 191 ++
pcs/test/tools/command_env/mock_push_cib.py | 66 +
pcs/test/tools/command_env/mock_runner.py | 123 ++
pcs/test/tools/command_env/spy.py | 101 +
pcs/test/tools/command_env/tools.py | 41 +
pcs/test/tools/custom_mock.py | 125 +-
.../test/resource => test/tools}/fixture.py | 104 +-
pcs/test/tools/fixture_cib.py | 170 ++
pcs/test/tools/integration_lib.py | 120 +-
pcs/test/tools/misc.py | 46 +-
pcs/test/tools/pcs_runner.py | 1 -
pcs/test/tools/xml.py | 1 -
pcs/usage.py | 43 +-
pcs/utils.py | 132 +-
pcsd/Gemfile.lock | 12 +-
pcsd/Makefile | 2 +-
pcsd/bootstrap.rb | 14 +-
pcsd/cfgsync.rb | 26 +-
pcsd/config.rb | 19 +-
pcsd/pcs.rb | 64 +-
pcsd/pcsd-cli.rb | 8 +-
pcsd/pcsd.8 | 5 +-
pcsd/pcsd.conf | 2 +
pcsd/pcsd.rb | 101 +-
pcsd/public/js/nodes-ember.js | 13 +-
pcsd/public/js/pcsd.js | 76 +-
pcsd/remote.rb | 30 +-
pcsd/settings.rb | 1 +
pcsd/ssl.rb | 3 +-
pcsd/test/test_cfgsync.rb | 162 +-
pcsd/test/test_config.rb | 108 +-
pcsd/test/tokens | 7 +-
pcsd/views/manage.erb | 11 +-
pcsd/views/nodes.erb | 4 +
pylintrc | 2 +-
setup.py | 2 +-
327 files changed, 14442 insertions(+), 8589 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bc7c8e5..751d1aa 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,54 @@
# Change Log
+## [0.9.160] - 2017-10-09
+
+### Added
+- Configurable pcsd port ([rhbz#1415197])
+- Description of the `--force` option added to man page and help
+ ([rhbz#1491631])
+
+### Fixed
+- Fixed some crashes when pcs encounters a non-ascii character in environment
+ variables, command line arguments and so on ([rhbz#1435697])
+- Fixed detecting if systemd is in use ([ghissue#118])
+- Upgrade CIB schema version when `resource-discovery` option is used in
+ location constraints ([rhbz#1420437])
+- Fixed error messages in `pcs cluster report` ([rhbz#1388783])
+- Increase request timeout when starting a cluster with large number of nodes
+ to prevent timeouts ([rhbz#1463327])
+- Fixed "Unable to update cib" error caused by invalid resource operation IDs
+- `pcs resource op defaults` now fails on an invalid option ([rhbz#1341582])
+- Fixed behaviour of `pcs cluster verify` command when entered with the filename
+ argument ([rhbz#1213946])
+
+### Changed
+- CIB changes are now pushed to pacemaker as a diff in commands overhauled to
+ the new architecture (previously the whole CIB was pushed). This resolves
+ race conditions and ACLs related errors when pushing CIB. ([rhbz#1441673])
+- All actions / operations defined in resource agent's metadata (except
+ meta-data, status and validate-all) are now copied to the CIB when creating
+ a resource. ([rhbz#1418199], [ghissue#132])
+- Improve documentation of the `pcs stonith confirm` command ([rhbz#1489682])
+
+### Deprecated
+- This is the last version fully supporting CMAN clusters and python 2.6.
+ Support for these will be gradually dropped.
+
+[ghissue#118]: https://github.com/ClusterLabs/pcs/issues/118
+[ghissue#132]: https://github.com/ClusterLabs/pcs/issues/132
+[rhbz#1213946]: https://bugzilla.redhat.com/show_bug.cgi?id=1213946
+[rhbz#1341582]: https://bugzilla.redhat.com/show_bug.cgi?id=1341582
+[rhbz#1388783]: https://bugzilla.redhat.com/show_bug.cgi?id=1388783
+[rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197
+[rhbz#1418199]: https://bugzilla.redhat.com/show_bug.cgi?id=1418199
+[rhbz#1420437]: https://bugzilla.redhat.com/show_bug.cgi?id=1420437
+[rhbz#1435697]: https://bugzilla.redhat.com/show_bug.cgi?id=1435697
+[rhbz#1441673]: https://bugzilla.redhat.com/show_bug.cgi?id=1441673
+[rhbz#1463327]: https://bugzilla.redhat.com/show_bug.cgi?id=1463327
+[rhbz#1489682]: https://bugzilla.redhat.com/show_bug.cgi?id=1489682
+[rhbz#1491631]: https://bugzilla.redhat.com/show_bug.cgi?id=1491631
+
+
## [0.9.159] - 2017-06-30
### Added
@@ -22,7 +71,7 @@
- Fixed a crash in the `pcs cluster node add-remote` command when an id
conflict occurs ([rhbz#1386114])
- Fixed creating a new cluster from the web UI ([rhbz#1284404])
-- `pcs cluster node add-guest` now works with the flag `--skipp-offline`
+- `pcs cluster node add-guest` now works with the flag `--skip-offline`
([rhbz#1176018])
- `pcs cluster node remove-guest` can be run again when the guest node was
unreachable first time ([rhbz#1176018])
@@ -34,7 +83,7 @@
`pcs resource create`, `pcs resource update` and `pcs resource op add`
commands ([rhbz#1443418])
- Flag `--force` works correctly when an operation is not successful on some
- nodes durrng `pcs cluster node add-remote` or `pcs cluster node add-guest`
+ nodes during `pcs cluster node add-remote` or `pcs cluster node add-guest`
([rhbz#1464781])
### Changed
diff --git a/Makefile b/Makefile
index 0e4dff4..ba23723 100644
--- a/Makefile
+++ b/Makefile
@@ -5,7 +5,7 @@ IS_DEBIAN=false
DISTRO_DEBIAN_VER_8=false
ifndef PYTHON
- PYTHON := $(shell which python)
+ PYTHON := $(shell which python3 || which python2 || which python)
endif
ifeq ($(UNAME_OS_GNU),true)
@@ -29,23 +29,14 @@ ifeq ($(PYTHON_SITELIB), /usr/lib/python2.7/dist-packages)
EXTRA_SETUP_OPTS="--install-layout=deb"
endif
-# Check for systemd presence, add compatibility with Debian based distros
-IS_SYSTEMCTL=false
-
-ifeq ($(IS_DEBIAN),true)
- IS_SYSTEMCTL = $(shell if [ -d /var/run/systemd/system ] ; then echo true ; else echo false; fi)
- ifeq ($(IS_SYSTEMCTL),false)
- ifeq ($(SYSTEMCTL_OVERRIDE),true)
- IS_SYSTEMCTL=true
- endif
- endif
+# Check for systemd presence
+ifeq ($(SYSTEMCTL_OVERRIDE),true)
+ IS_SYSTEMCTL=true
else
- ifeq ("$(wildcard /usr/bin/systemctl)","/usr/bin/systemctl")
- IS_SYSTEMCTL=true
+ ifeq ($(SYSTEMCTL_OVERRIDE),false)
+ IS_SYSTEMCTL=false
else
- ifeq ("$(wildcard /bin/systemctl)","/usr/bin/systemctl")
- IS_SYSTEMCTL=true
- endif
+ IS_SYSTEMCTL = $(shell if [ -d /run/systemd/system ] || [ -d /var/run/systemd/system ] ; then echo true ; else echo false; fi)
endif
endif
@@ -100,7 +91,7 @@ pcsd_fonts = \
install:
# make Python interpreter execution sane (via -Es flags)
- echo -e "[build]\nexecutable = $(PYTHON) -Es\n" > setup.cfg
+ printf "[build]\nexecutable = $(PYTHON) -Es\n" > setup.cfg
$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
# fix excessive script interpreting "executable" quoting with old setuptools:
# https://github.com/pypa/setuptools/issues/188
diff --git a/README b/README
deleted file mode 100644
index 0674d96..0000000
--- a/README
+++ /dev/null
@@ -1,49 +0,0 @@
-PCS - Pacemaker/Corosync configuration system
-
-Quick start
-
-To install pcs run the following in terminal
-
-# tar -xzvf pcs-0.9.143.tar.gz
-# cd pcs-0.9.143
-# make install
-
-This will install pcs into /usr/sbin/pcs
-
-To create a cluster run the following commands on all nodes (replacing node1,
-node2, node3 with a list of nodes in the cluster).
-# pcs cluster setup --local --name cluster_name node1 node2 node3
-
-Then run the following command on all nodes:
-# pcs cluster start
-
-After a few moments the cluster should startup and you can get the status of
-the cluster
-# pcs status
-
-After this you can add resources and stonith agents:
-# pcs resource help
-and
-# pcs stonith help
-
-You can also install pcsd which operates as a GUI and remote server for pcs.
-pcsd may also be necessary in order to follow the guides on the clusterlabs.org
-website. To install pcsd run the following commands from the root of your pcs
-directory. (You must have the ruby bundler gem installed, rubygem-bundler in
-Fedora, and development packages installed)
-
-# cd pcsd ; make get_gems ; cd ..
-# make install_pcsd
-
-If you are using GNU/Linux its now time to:
-# systemctl daemon-reload
-
-Currently this is built into Fedora (other distributions to follow). You can
-see the current Fedora .spec in the fedora package git repositories here:
-http://pkgs.fedoraproject.org/cgit/pcs.git/
-
-Current Fedora 23 .spec:
-http://pkgs.fedoraproject.org/cgit/pcs.git/tree/pcs.spec?h=f23
-
-If you have an questions or concerns please feel free to email
-cfeist at redhat.com or open a github issue on the pcs project.
diff --git a/README.md b/README.md
index b0d6cad..b45ea12 100644
--- a/README.md
+++ b/README.md
@@ -1,90 +1,159 @@
-## PCS - Pacemaker/Corosync configuration system
+## PCS - Pacemaker/Corosync Configuration System
+
+Pcs is a Corosync and Pacemaker configuration tool. It permits users to
+easily view, modify and create Pacemaker based clusters. Pcs contains pcsd, a
+pcs daemon, which operates as a remote server for pcs and provides a web UI.
+
+---
+
+### Installation from Source
+
+These are the runtime dependencies of pcs and pcsd:
+* python 2.6+
+* python-lxml / python3-lxml
+* python-pycurl / python3-pycurl
+* python-setuptools / python3-setuptools
+* ruby 2.0.0+
+* killall (package psmisc)
+* openssl
+* corosync
+* pacemaker
+
+It is also recommended to have these:
+* python-clufter / python3-clufter
+* liberation fonts (package liberation-sans-fonts or fonts-liberation or
+ fonts-liberation2)
+* overpass fonts (package overpass-fonts)
+
+If you plan to manage Corosync 1.x based clusters, you will also need:
+* cman
+* ccs
+
+It is however highly recommended for new clusters to use Corosync 2.x.
+
+Apart from the dependencies listed above, these are also required for
+installation:
+
+* python development files (package python-devel / python3-devel)
+* ruby development files (package ruby-devel)
+* rubygems
+* rubygem bundler (package rubygem-bundler or ruby-bundler or bundler)
+* gcc
+* gcc-c++
+* PAM development files (package pam-devel or libpam0g-dev)
+* FFI development files (package libffi-devel or libffi-dev)
+* fontconfig
+* printf (package coreutils)
+* redhat-rpm-config if you are using Fedora
+
+During the installation, all required rubygems are automatically downloaded and
+compiled.
+
+To install pcs and pcsd run the following in terminal:
+```shell
+# tar -xzvf pcs-0.9.159.tar.gz
+# cd pcs-0.9.159
+# make install
+# make install_pcsd
+```
+
+If you are using GNU/Linux with systemd, it is now time to:
+```shell
+# systemctl daemon-reload
+```
+
+Start pcsd and make it start on boot:
+```shell
+# systemctl start pcsd
+# systemctl enable pcsd
+```
+
+---
+### Packages
-### Quick Start
-***
-
+Currently this is built into Fedora, RHEL and its clones and Debian and its
+clones.
+* [Fedora package git repositories](http://pkgs.fedoraproject.org/cgit/rpms/pcs.git/)
+* [Current Fedora .spec](http://pkgs.fedoraproject.org/cgit/rpms/pcs.git/tree/pcs.spec)
+* [Debian-HA project home page](http://debian-ha.alioth.debian.org/)
-- **PCS Installation from Source**
+---
- Run the following in terminal:
+### Quick Start
- ```shell
- # tar -xzvf pcs-0.9.143.tar.gz
- # cd pcs-0.9.143
- # make install
- ```
+* **Authenticate cluster nodes**
- This will install pcs into `/usr/sbin/pcs`.
+ Set the same password for the `hacluster` user on all nodes.
+ ```shell
+ # passwd hacluster
+ ```
-<br />
-- **Create and Start a Basic Cluster**
+ To authenticate the nodes, run the following command on one of the nodes
+ (replacing node1, node2, node3 with a list of nodes in your future cluster).
+ Specify all your cluster nodes in the command. Make sure pcsd is running on
+ all nodes.
+ ```shell
+ # pcs cluster auth node1 node2 node3 -u hacluster
+ ```
- To create a cluster run the following commands on all nodes (replacing node1, node2, node3 with a list of nodes in the cluster).
+* **Create a cluster**
- ```shell
- # pcs cluster setup --local --name cluster_name node1 node2 node3
- ```
+ To create a cluster run the following command on one node (replacing
+ cluster\_name with a name of your cluster and node1, node2, node3 with a list
+ of nodes in the cluster). `--start` and `--enable` will start your cluster
+ and configure the nodes to start the cluster on boot respectively.
+ ```shell
+ # pcs cluster setup --name cluster_name node1 node2 node3 --start --enable
+ ```
- Then run the following command on all nodes:
-
- ```shell
- # pcs cluster start
- ```
-
-<br />
-- **Check the Cluster Status**
-
- After a few moments the cluster should startup and you can get the status of the cluster
+* **Check the cluster status**
+ After a few moments the cluster should startup and you can get the status of
+ the cluster.
```shell
# pcs status
```
-<br />
-- **Add Cluster Resources**
-
- After this you can add resources and stonith agents:
+* **Add cluster resources**
+ After this you can add stonith agents and resources:
```shell
- # pcs resource help
+ # pcs -h stonith create
```
-
and
-
```shell
- # pcs stonith help
+ # pcs -h resource create
```
-<br />
-- **PCSD Installation from Source**
+---
- You can also install pcsd which operates as a GUI and remote server for pcs. pcsd may also be necessary in order to follow the guides on the clusterlabs.org website.
+### Accessing the Web UI
- To install pcsd run the following commands from the root of your pcs directory. (You must have the ruby bundler gem installed, rubygem-bundler in Fedora, and development packages installed)
+Apart from command line interface you can use web user interface to view and
+configure your cluster. To access the web UI open a browser to the following
+URL (replace nodename with an address of your node):
+```
+https://nodename:2224
+```
+Login as the `hacluster` user.
- ```shell
- # cd pcsd ; make get_gems ; cd ..
- # make install_pcsd
- ```
+---
- If you are using GNU/Linux its now time to:
-
- ```shell
- # systemctl daemon-reload
- ```
-
-<br />
-### Packages
-***
+### Further Documentation
- Currently this is built into Fedora (other distributions to follow). You can see the current Fedora .spec in the fedora package git repositories here: http://pkgs.fedoraproject.org/cgit/pcs.git/
+[ClusterLabs website](http://clusterlabs.org) is an excellent place to learn
+more about Pacemaker clusters.
+* [ClusterLabs quick start](http://clusterlabs.org/quickstart.html)
+* [Clusters from Scratch](http://clusterlabs.org/doc/en-US/Pacemaker/1.1-pcs/html/Clusters_from_Scratch/index.html)
+* [ClusterLabs documentation page](http://clusterlabs.org/doc/)
- Current Fedora 23 .spec:
- http://pkgs.fedoraproject.org/cgit/pcs.git/tree/pcs.spec?h=f23
+---
-<br />
### Inquiries
-***
+If you have any bug reports or feature requests please feel free to open a
+github issue on the pcs project.
-If you have any questions or concerns please feel free to email cfeist at redhat.com or open a github issue on the pcs project.
+Alternatively you can use ClusterLabs
+[users mailinglist](http://oss.clusterlabs.org/mailman/listinfo/users)
+which is also a great place to ask Pacemaker clusters related questions.
diff --git a/maketarballs.py b/maketarballs.py
index d3048fe..45c9528 100644
--- a/maketarballs.py
+++ b/maketarballs.py
@@ -3,7 +3,6 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from __future__ import unicode_literals
import sys
import os
diff --git a/newversion.py b/newversion.py
index 1dba780..1d453f7 100644
--- a/newversion.py
+++ b/newversion.py
@@ -3,7 +3,6 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from __future__ import unicode_literals
import sys
import os
diff --git a/pcs/acl.py b/pcs/acl.py
index ffa53f6..1ff3194 100644
--- a/pcs/acl.py
+++ b/pcs/acl.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/alert.py b/pcs/alert.py
index e8b7146..78c87e9 100644
--- a/pcs/alert.py
+++ b/pcs/alert.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/app.py b/pcs/app.py
index a53f7eb..f5cd2a8 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import getopt
diff --git a/pcs/booth.py b/pcs/booth.py
index 5f41115..62465da 100644
--- a/pcs/booth.py
+++ b/pcs/booth.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py
index b56266f..b5665dd 100644
--- a/pcs/cli/booth/command.py
+++ b/pcs/cli/booth/command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.common.errors import CmdLineInputError
diff --git a/pcs/cli/booth/console_report.py b/pcs/cli/booth/console_report.py
index 9acd76f..32bc666 100644
--- a/pcs/cli/booth/console_report.py
+++ b/pcs/cli/booth/console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes as codes
diff --git a/pcs/cli/booth/env.py b/pcs/cli/booth/env.py
index 908b9dc..a8901b0 100644
--- a/pcs/cli/booth/env.py
+++ b/pcs/cli/booth/env.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.common import console_report
diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py
index 8ba2c0e..768e0b8 100644
--- a/pcs/cli/booth/test/test_command.py
+++ b/pcs/cli/booth/test/test_command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py
index 6c2cfb4..179e30e 100644
--- a/pcs/cli/booth/test/test_env.py
+++ b/pcs/cli/booth/test/test_env.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/booth/test/test_reports.py b/pcs/cli/booth/test/test_reports.py
index 9e90ffd..e164f22 100644
--- a/pcs/cli/booth/test/test_reports.py
+++ b/pcs/cli/booth/test/test_reports.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/cluster/command.py b/pcs/cli/cluster/command.py
index d3c83cd..37d6359 100644
--- a/pcs/cli/cluster/command.py
+++ b/pcs/cli/cluster/command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.resource.parse_args import(
@@ -36,7 +35,7 @@ def node_add_remote(lib, arg_list, modifiers):
parts = parse_resource_create_args(rest_args)
force = modifiers["force"]
- lib.cluster.node_add_remote(
+ lib.remote_node.node_add_remote(
node_host,
node_name,
parts["op"],
@@ -55,7 +54,7 @@ def create_node_remove_remote(remove_resource):
def node_remove_remote(lib, arg_list, modifiers):
if not arg_list:
raise CmdLineInputError()
- lib.cluster.node_remove_remote(
+ lib.remote_node.node_remove_remote(
arg_list[0],
remove_resource,
skip_offline_nodes=modifiers["skip_offline_nodes"],
@@ -73,7 +72,7 @@ def node_add_guest(lib, arg_list, modifiers):
resource_id = arg_list[1]
meta_options = prepare_options(arg_list[2:])
- lib.cluster.node_add_guest(
+ lib.remote_node.node_add_guest(
node_name,
resource_id,
meta_options,
@@ -87,7 +86,7 @@ def node_remove_guest(lib, arg_list, modifiers):
if not arg_list:
raise CmdLineInputError()
- lib.cluster.node_remove_guest(
+ lib.remote_node.node_remove_guest(
arg_list[0],
skip_offline_nodes=modifiers["skip_offline_nodes"],
allow_remove_multiple_nodes=modifiers["force"],
diff --git a/pcs/cli/cluster/test/test_command.py b/pcs/cli/cluster/test/test_command.py
index 9a8e76b..749b425 100644
--- a/pcs/cli/cluster/test/test_command.py
+++ b/pcs/cli/cluster/test/test_command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/common/completion.py b/pcs/cli/common/completion.py
index 0b4cee8..294f028 100644
--- a/pcs/cli/common/completion.py
+++ b/pcs/cli/common/completion.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
def has_applicable_environment(environment):
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index c1f03e8..b3a4a3d 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from collections import Iterable
@@ -285,7 +284,12 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
codes.EMPTY_ID: lambda info:
"{id_description} cannot be empty"
- .format(**info)
+ .format(**info)
+ ,
+
+ codes.INVALID_CIB_CONTENT: lambda info:
+ "invalid cib: \n{0}"
+ .format(info["report"])
,
codes.INVALID_ID: lambda info:
@@ -415,8 +419,10 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
,
codes.NODE_COMMUNICATION_ERROR_TIMED_OUT: lambda info:
- "{node}: Connection timeout ({reason})"
- .format(**info)
+ (
+ "{node}: Connection timeout, try setting higher timeout in "
+ "--request-timeout option ({reason})"
+ ).format(**info)
,
codes.NODE_COMMUNICATION_PROXY_IS_SET:
@@ -441,6 +447,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
)
,
+ codes.DEFAULTS_CAN_BE_OVERRIDEN:
+ "Defaults do not apply to resources which override them with their "
+ "own defined values"
+ ,
+
codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED:
"Sending updated corosync.conf to nodes..."
,
@@ -691,6 +702,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
+ codes.CIB_DIFF_ERROR: lambda info:
+ "Unable to diff CIB: {reason}\n{cib_new}"
+ .format(**info)
+ ,
+
codes.CIB_SAVE_TMP_ERROR: lambda info:
"Unable to save CIB to a temporary file: {reason}"
.format(**info)
@@ -1293,4 +1309,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
" 'pcs cluster node remove-guest'"
)
,
+
+ codes.TMP_FILE_WRITE: lambda info:
+ (
+ "Writing to a temporary file {file_path}:\n"
+ "--Debug Content Start--\n{content}\n--Debug Content End--\n"
+ ).format(**info)
+ ,
}
diff --git a/pcs/cli/common/env_cli.py b/pcs/cli/common/env_cli.py
index cfe08fc..86d41c7 100644
--- a/pcs/cli/common/env_cli.py
+++ b/pcs/cli/common/env_cli.py
@@ -2,20 +2,18 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
class Env(object):
#pylint: disable=too-many-instance-attributes
def __init__(self):
self.cib_data = None
- self.cib_upgraded = False
self.user = None
self.groups = None
self.corosync_conf_data = None
self.booth = None
self.pacemaker = None
- self.auth_tokens_getter = None
+ self.token_file_data_getter = None
self.debug = False
self.cluster_conf_data = None
self.request_timeout = None
diff --git a/pcs/cli/common/env_file.py b/pcs/cli/common/env_file.py
index 56e6065..8956660 100644
--- a/pcs/cli/common/env_file.py
+++ b/pcs/cli/common/env_file.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os.path
diff --git a/pcs/cli/common/errors.py b/pcs/cli/common/errors.py
index 47eca00..097620c 100644
--- a/pcs/cli/common/errors.py
+++ b/pcs/cli/common/errors.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index 4d6ed9a..85a7c0c 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import logging
@@ -23,8 +22,10 @@ from pcs.lib.commands import (
node,
qdevice,
quorum,
+ remote_node,
resource_agent,
resource,
+ cib_options,
stonith,
sbd,
stonith_agent,
@@ -52,15 +53,14 @@ def cli_env_to_lib_env(cli_env):
cli_env.cib_data,
cli_env.corosync_conf_data,
booth=cli_env.booth,
- auth_tokens_getter=cli_env.auth_tokens_getter,
+ token_file_data_getter=cli_env.token_file_data_getter,
cluster_conf_data=cli_env.cluster_conf_data,
request_timeout=cli_env.request_timeout,
)
def lib_env_to_cli_env(lib_env, cli_env):
if not lib_env.is_cib_live:
- cli_env.cib_data = lib_env._get_cib_xml()
- cli_env.cib_upgraded = lib_env.cib_upgraded
+ cli_env.cib_data = lib_env.final_mocked_cib_content
if not lib_env.is_corosync_conf_live:
cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
if not lib_env.is_cluster_conf_live:
@@ -190,11 +190,23 @@ def load_module(env, middleware_factory, name):
middleware_factory.corosync_conf_existing,
),
{
- "node_add_remote": cluster.node_add_remote,
- "node_add_guest": cluster.node_add_guest,
- "node_remove_remote": cluster.node_remove_remote,
- "node_remove_guest": cluster.node_remove_guest,
"node_clear": cluster.node_clear,
+ "verify": cluster.verify,
+ }
+ )
+
+ if name == "remote_node":
+ return bind_all(
+ env,
+ middleware.build(
+ middleware_factory.cib,
+ middleware_factory.corosync_conf_existing,
+ ),
+ {
+ "node_add_remote": remote_node.node_add_remote,
+ "node_add_guest": remote_node.node_add_guest,
+ "node_remove_remote": remote_node.node_remove_remote,
+ "node_remove_guest": remote_node.node_remove_guest,
}
)
@@ -335,6 +347,19 @@ def load_module(env, middleware_factory, name):
"unmanage": resource.unmanage,
}
)
+
+ if name == "cib_options":
+ return bind_all(
+ env,
+ middleware.build(
+ middleware_factory.cib,
+ ),
+ {
+ "set_operations_defaults": cib_options.set_operations_defaults,
+ "set_resources_defaults": cib_options.set_resources_defaults,
+ }
+ )
+
if name == "stonith":
return bind_all(
env,
diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py
index 29a9247..a489430 100644
--- a/pcs/cli/common/middleware.py
+++ b/pcs/cli/common/middleware.py
@@ -2,13 +2,12 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from collections import namedtuple
from functools import partial
-from pcs.cli.common import console_report
+from pcs.cli.common.console_report import error
def build(*middleware_list):
@@ -20,7 +19,7 @@ def build(*middleware_list):
return next_in_line(env, *args, **kwargs)
return run
-def cib(use_local_cib, load_cib_content, write_cib):
+def cib(filename, touch_cib_file):
"""
return configured middleware that cares about local cib
bool use_local_cib is flag if local cib was required
@@ -28,14 +27,29 @@ def cib(use_local_cib, load_cib_content, write_cib):
callable write_cib put content of cib to required place
"""
def apply(next_in_line, env, *args, **kwargs):
- if use_local_cib:
- original_content = load_cib_content()
+ if filename:
+ touch_cib_file(filename)
+ try:
+ with open(filename, mode="r") as cib_file:
+ original_content = cib_file.read()
+ except EnvironmentError as e:
+ raise error(
+ "Cannot read cib file '{0}': '{1}'"
+ .format(filename, str(e))
+ )
env.cib_data = original_content
result_of_next = next_in_line(env, *args, **kwargs)
- if use_local_cib and env.cib_data != original_content:
- write_cib(env.cib_data, env.cib_upgraded)
+ if filename and env.cib_data != original_content:
+ try:
+ with open(filename, mode="w") as cib_file:
+ cib_file.write(env.cib_data)
+ except EnvironmentError as e:
+ raise error(
+ "Cannot write cib file '{0}': '{1}'"
+ .format(filename, str(e))
+ )
return result_of_next
return apply
@@ -46,7 +60,7 @@ def corosync_conf_existing(local_file_path):
try:
env.corosync_conf_data = open(local_file_path).read()
except EnvironmentError as e:
- raise console_report.error("Unable to read {0}: {1}".format(
+ raise error("Unable to read {0}: {1}".format(
local_file_path,
e.strerror
))
@@ -59,7 +73,7 @@ def corosync_conf_existing(local_file_path):
f.write(env.corosync_conf_data)
f.close()
except EnvironmentError as e:
- raise console_report.error("Unable to write {0}: {1}".format(
+ raise error("Unable to write {0}: {1}".format(
local_file_path,
e.strerror
))
@@ -73,7 +87,7 @@ def cluster_conf_read_only(local_file_path):
try:
env.cluster_conf_data = open(local_file_path).read()
except EnvironmentError as e:
- raise console_report.error("Unable to read {0}: {1}".format(
+ raise error("Unable to read {0}: {1}".format(
local_file_path,
e.strerror
))
diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
index d72a6d4..70b926c 100644
--- a/pcs/cli/common/parse_args.py
+++ b/pcs/cli/common/parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.common.errors import CmdLineInputError
diff --git a/pcs/cli/common/reports.py b/pcs/cli/common/reports.py
index 064be11..5fd39cb 100644
--- a/pcs/cli/common/reports.py
+++ b/pcs/cli/common/reports.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
@@ -12,7 +11,11 @@ from functools import partial
from pcs.cli.booth.console_report import (
CODE_TO_MESSAGE_BUILDER_MAP as BOOTH_CODE_TO_MESSAGE_BUILDER_MAP
)
-from pcs.cli.common.console_report import CODE_TO_MESSAGE_BUILDER_MAP
+from pcs.cli.common.console_report import (
+ CODE_TO_MESSAGE_BUILDER_MAP,
+ error,
+ warn,
+)
from pcs.cli.constraint_all.console_report import (
CODE_TO_MESSAGE_BUILDER_MAP as CONSTRAINT_CODE_TO_MESSAGE_BUILDER_MAP
)
@@ -83,6 +86,12 @@ class LibraryReportProcessorToConsole(object):
if item.severity == ReportItemSeverity.ERROR
])
+ def report(self, report_item):
+ return self.report_list([report_item])
+
+ def report_list(self, report_item_list):
+ return self._send(report_item_list)
+
def process(self, report_item):
self.append(report_item)
self.send()
@@ -91,15 +100,21 @@ class LibraryReportProcessorToConsole(object):
self.extend(report_item_list)
self.send()
- def send(self):
+ def _send(self, report_item_list, print_errors=True):
errors = []
- for report_item in self.items:
+ for report_item in report_item_list:
if report_item.severity == ReportItemSeverity.ERROR:
+ if print_errors:
+ error(build_report_message(report_item))
errors.append(report_item)
elif report_item.severity == ReportItemSeverity.WARNING:
- print("Warning: " + build_report_message(report_item))
+ warn(build_report_message(report_item))
elif self.debug or report_item.severity != ReportItemSeverity.DEBUG:
print(build_report_message(report_item))
+ return errors
+
+ def send(self):
+ errors = self._send(self.items, print_errors=False)
self.items = []
if errors:
raise LibraryError(*errors)
@@ -114,6 +129,9 @@ def process_library_reports(report_item_list):
"""
report_item_list list of ReportItem
"""
+ if not report_item_list:
+ error("Errors have occurred, therefore pcs is unable to continue")
+
critical_error = False
for report_item in report_item_list:
if report_item.severity == ReportItemSeverity.WARNING:
diff --git a/pcs/cli/common/test/test_completion.py b/pcs/cli/common/test/test_completion.py
index daec1bc..b6f53f0 100644
--- a/pcs/cli/common/test/test_completion.py
+++ b/pcs/cli/common/test/test_completion.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
index 6d9c280..ea862f8 100644
--- a/pcs/cli/common/test/test_console_report.py
+++ b/pcs/cli/common/test/test_console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
@@ -37,9 +36,13 @@ class NameBuildTest(TestCase):
"""
code = None
- def assert_message_from_info(self, message, info):
+ def assert_message_from_info(self, message, info=None):
+ info = info if info else {}
build = CODE_TO_MESSAGE_BUILDER_MAP[self.code]
- self.assertEqual(message, build(info))
+ self.assertEqual(
+ message,
+ build(info) if callable(build) else build
+ )
class BuildInvalidOptionMessageTest(NameBuildTest):
@@ -181,6 +184,17 @@ class BuildServiceStartErrorTest(NameBuildTest):
}
)
+class InvalidCibContent(NameBuildTest):
+ code = codes.INVALID_CIB_CONTENT
+ def test_build_message(self):
+ report = "report\nlines"
+ self.assert_message_from_info(
+ "invalid cib: \n{0}".format(report),
+ {
+ "report": report,
+ }
+ )
+
class BuildInvalidIdTest(NameBuildTest):
code = codes.INVALID_ID
def test_build_message_with_first_char_invalid(self):
@@ -312,6 +326,24 @@ class BuildNodeCommunicationStartedTest(NameBuildTest):
}
)
+
+class NodeCommunicationErrorTimedOut(NameBuildTest):
+ code = codes.NODE_COMMUNICATION_ERROR_TIMED_OUT
+ def test_success(self):
+ self.assert_message_from_info(
+ (
+ "node-1: Connection timeout, try setting higher timeout in "
+ "--request-timeout option (Connection timed out after 60049 "
+ "milliseconds)"
+ ),
+ {
+ "node": "node-1",
+ "command": "/remote/command",
+ "reason": "Connection timed out after 60049 milliseconds",
+ }
+ )
+
+
class FormatOptionalTest(TestCase):
def test_info_key_is_falsy(self):
self.assertEqual("", format_optional("", "{0}: "))
@@ -1737,3 +1769,42 @@ class ServiceDisableSuccess(NameBuildTest):
"instance": "an_instance",
}
)
+
+
+class CibDiffError(NameBuildTest):
+ code = codes.CIB_DIFF_ERROR
+ def test_success(self):
+ self.assert_message_from_info(
+ "Unable to diff CIB: error message\n<cib-new />",
+ {
+ "reason": "error message",
+ "cib_old": "<cib-old />",
+ "cib_new": "<cib-new />",
+ }
+ )
+
+
+class TmpFileWrite(NameBuildTest):
+ code = codes.TMP_FILE_WRITE
+ def test_success(self):
+ self.assert_message_from_info(
+ (
+ "Writing to a temporary file /tmp/pcs/test.tmp:\n"
+ "--Debug Content Start--\n"
+ "test file\ncontent\n\n"
+ "--Debug Content End--\n"
+ ),
+ {
+ "file_path": "/tmp/pcs/test.tmp",
+ "content": "test file\ncontent\n",
+ }
+ )
+
+
+class DefaultsCanBeOverriden(NameBuildTest):
+ code = codes.DEFAULTS_CAN_BE_OVERRIDEN
+ def test_message(self):
+ self.assert_message_from_info(
+ "Defaults do not apply to resources which override them with their "
+ "own defined values"
+ )
diff --git a/pcs/cli/common/test/test_env_file.py b/pcs/cli/common/test/test_env_file.py
index e0104bd..0d9f16c 100644
--- a/pcs/cli/common/test/test_env_file.py
+++ b/pcs/cli/common/test/test_env_file.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py
index f510353..67f20d6 100644
--- a/pcs/cli/common/test/test_lib_wrapper.py
+++ b/pcs/cli/common/test/test_lib_wrapper.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py
index 7eefbca..37b855f 100644
--- a/pcs/cli/common/test/test_middleware.py
+++ b/pcs/cli/common/test/test_middleware.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py
index 5b79b85..efe38d0 100644
--- a/pcs/cli/common/test/test_parse_args.py
+++ b/pcs/cli/common/test/test_parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/common/test/test_reports.py b/pcs/cli/common/test/test_reports.py
index 04bc6a0..4bbc6cd 100644
--- a/pcs/cli/common/test/test_reports.py
+++ b/pcs/cli/common/test/test_reports.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/constraint/command.py b/pcs/cli/constraint/command.py
index 851b13b..7d9f8cf 100644
--- a/pcs/cli/constraint/command.py
+++ b/pcs/cli/constraint/command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.constraint import parse_args, console_report
diff --git a/pcs/cli/constraint/console_report.py b/pcs/cli/constraint/console_report.py
index c9c84ef..6acbaab 100644
--- a/pcs/cli/constraint/console_report.py
+++ b/pcs/cli/constraint/console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
diff --git a/pcs/cli/constraint/parse_args.py b/pcs/cli/constraint/parse_args.py
index 41ec182..0c79c6d 100644
--- a/pcs/cli/constraint/parse_args.py
+++ b/pcs/cli/constraint/parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.common import parse_args
diff --git a/pcs/cli/constraint/test/test_command.py b/pcs/cli/constraint/test/test_command.py
index 6a79e00..0d79b10 100644
--- a/pcs/cli/constraint/test/test_command.py
+++ b/pcs/cli/constraint/test/test_command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/constraint/test/test_console_report.py b/pcs/cli/constraint/test/test_console_report.py
index 084124c..2b6d6cf 100644
--- a/pcs/cli/constraint/test/test_console_report.py
+++ b/pcs/cli/constraint/test/test_console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/constraint/test/test_parse_args.py b/pcs/cli/constraint/test/test_parse_args.py
index 484cb8d..80ec2b7 100644
--- a/pcs/cli/constraint/test/test_parse_args.py
+++ b/pcs/cli/constraint/test/test_parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/constraint_all/console_report.py b/pcs/cli/constraint_all/console_report.py
index dac0554..fa89cf6 100644
--- a/pcs/cli/constraint_all/console_report.py
+++ b/pcs/cli/constraint_all/console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.constraint.console_report import (
diff --git a/pcs/cli/constraint_all/test/test_console_report.py b/pcs/cli/constraint_all/test/test_console_report.py
index ece5876..c8b1aee 100644
--- a/pcs/cli/constraint_all/test/test_console_report.py
+++ b/pcs/cli/constraint_all/test/test_console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/constraint_colocation/command.py b/pcs/cli/constraint_colocation/command.py
index b62fc7a..6182506 100644
--- a/pcs/cli/constraint_colocation/command.py
+++ b/pcs/cli/constraint_colocation/command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.constraint import command
diff --git a/pcs/cli/constraint_colocation/console_report.py b/pcs/cli/constraint_colocation/console_report.py
index 0ede276..35638f5 100644
--- a/pcs/cli/constraint_colocation/console_report.py
+++ b/pcs/cli/constraint_colocation/console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
def constraint_plain(constraint_info, with_id=False):
diff --git a/pcs/cli/constraint_order/command.py b/pcs/cli/constraint_order/command.py
index 8b77dbd..b0d4fc9 100644
--- a/pcs/cli/constraint_order/command.py
+++ b/pcs/cli/constraint_order/command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.constraint import command
diff --git a/pcs/cli/constraint_order/console_report.py b/pcs/cli/constraint_order/console_report.py
index 42aa81a..1b8998d 100644
--- a/pcs/cli/constraint_order/console_report.py
+++ b/pcs/cli/constraint_order/console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib.pacemaker.values import is_true
diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py
index 583ba9e..df761ae 100644
--- a/pcs/cli/constraint_ticket/command.py
+++ b/pcs/cli/constraint_ticket/command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.common.errors import CmdLineInputError
diff --git a/pcs/cli/constraint_ticket/console_report.py b/pcs/cli/constraint_ticket/console_report.py
index 54343d7..bffcc4b 100644
--- a/pcs/cli/constraint_ticket/console_report.py
+++ b/pcs/cli/constraint_ticket/console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.constraint.console_report import prepare_options
diff --git a/pcs/cli/constraint_ticket/parse_args.py b/pcs/cli/constraint_ticket/parse_args.py
index dfd2c1a..97de1d5 100644
--- a/pcs/cli/constraint_ticket/parse_args.py
+++ b/pcs/cli/constraint_ticket/parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.common import parse_args
diff --git a/pcs/cli/constraint_ticket/test/test_command.py b/pcs/cli/constraint_ticket/test/test_command.py
index 9ca7817..eab9810 100644
--- a/pcs/cli/constraint_ticket/test/test_command.py
+++ b/pcs/cli/constraint_ticket/test/test_command.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/constraint_ticket/test/test_console_report.py b/pcs/cli/constraint_ticket/test/test_console_report.py
index 11af2e2..51fc2b7 100644
--- a/pcs/cli/constraint_ticket/test/test_console_report.py
+++ b/pcs/cli/constraint_ticket/test/test_console_report.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/constraint_ticket/test/test_parse_args.py b/pcs/cli/constraint_ticket/test/test_parse_args.py
index 4a592c2..7edb654 100644
--- a/pcs/cli/constraint_ticket/test/test_parse_args.py
+++ b/pcs/cli/constraint_ticket/test/test_parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cli/fencing_topology.py b/pcs/cli/fencing_topology.py
index 52367f2..ec3945e 100644
--- a/pcs/cli/fencing_topology.py
+++ b/pcs/cli/fencing_topology.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common.fencing_topology import (
diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
index 1bdcd5b..122a8f4 100644
--- a/pcs/cli/resource/parse_args.py
+++ b/pcs/cli/resource/parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.cli.common.parse_args import group_by_keywords, prepare_options
from pcs.cli.common.errors import CmdLineInputError
diff --git a/pcs/cli/resource/test/test_parse_args.py b/pcs/cli/resource/test/test_parse_args.py
index 791b60d..aed4a54 100644
--- a/pcs/cli/resource/test/test_parse_args.py
+++ b/pcs/cli/resource/test/test_parse_args.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/cluster.py b/pcs/cluster.py
index cbf5726..a2971e6 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -2,9 +2,9 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
+import math
import os
import subprocess
import re
@@ -22,6 +22,13 @@ except ImportError:
# python3
from subprocess import getstatusoutput
+try:
+ # python2
+ from urlparse import urlparse
+except ImportError:
+ # python3
+ from urllib.parse import urlparse
+
from pcs import (
constraint,
node,
@@ -46,8 +53,17 @@ from pcs.lib import (
reports as lib_reports,
)
from pcs.lib.booth import sync as booth_sync
-from pcs.lib.commands.cluster import _share_authkey, _destroy_pcmk_remote_env
+from pcs.lib.commands.remote_node import _share_authkey, _destroy_pcmk_remote_env
from pcs.lib.commands.quorum import _add_device_model_net
+from pcs.lib.communication.corosync import CheckCorosyncOffline
+from pcs.lib.communication.nodes import DistributeFiles
+from pcs.lib.communication.sbd import (
+ CheckSbd,
+ SetSbdConfig,
+ EnableSbdService,
+ DisableSbdService,
+)
+from pcs.lib.communication.tools import run_and_raise
from pcs.lib.corosync import (
config_parser as corosync_conf_utils,
qdevice_net,
@@ -66,8 +82,7 @@ from pcs.lib.external import (
node_communicator_exception_to_report_item,
)
from pcs.lib.env_tools import get_nodes
-from pcs.lib.node import NodeAddresses, NodeAddressesList
-from pcs.lib.nodes_task import check_corosync_offline_on_nodes, distribute_files
+from pcs.lib.node import NodeAddresses
from pcs.lib import node_communication_format
import pcs.lib.pacemaker.live as lib_pacemaker
from pcs.lib.tools import (
@@ -262,10 +277,10 @@ def auth_nodes(nodes):
else:
password = None
- set_nodes = set(nodes)
+ nodes_dict = parse_nodes_with_ports(nodes)
need_auth = "--force" in utils.pcs_options or (username or password)
if not need_auth:
- for node in set_nodes:
+ for node in nodes_dict.keys():
status = utils.checkAuthorization(node)
if status[0] == 3:
need_auth = True
@@ -275,7 +290,9 @@ def auth_nodes(nodes):
try:
auth_status = json.loads(status[1])
if auth_status["success"]:
- if set_nodes.issubset(set(auth_status["node_list"])):
+ if set(nodes_dict.keys()).issubset(
+ set(auth_status["node_list"])
+ ):
mutually_authorized = True
except (ValueError, KeyError):
pass
@@ -290,13 +307,32 @@ def auth_nodes(nodes):
password = utils.get_terminal_password()
utils.auth_nodes_do(
- set_nodes, username, password, '--force' in utils.pcs_options,
+ nodes_dict, username, password, '--force' in utils.pcs_options,
'--local' in utils.pcs_options
)
else:
- for node in set_nodes:
+ for node in nodes_dict.keys():
print(node + ": Already authorized")
+
+def parse_nodes_with_ports(node_list):
+ result = {}
+ for node in node_list:
+ if node.count(":") > 1 and not node.startswith("["):
+ # if IPv6 without port put it in parentheses
+ node = "[{0}]".format(node)
+ # adding protocol so urlparse will parse hostname/ip and port correctly
+ url = urlparse("http://{0}".format(node))
+ if url.hostname in result and result[url.hostname] != url.port:
+ raise CmdLineInputError(
+ "Node '{0}' defined twice with different ports".format(
+ url.hostname
+ )
+ )
+ result[url.hostname] = url.port
+ return result
+
+
def cluster_certkey(argv):
return pcsd.pcsd_certkey(argv)
@@ -380,7 +416,8 @@ def cluster_setup(argv):
)
if udpu_rrp and "rrp_mode" not in options["transport_options"]:
options["transport_options"]["rrp_mode"] = "passive"
- process_library_reports(messages)
+ if messages:
+ process_library_reports(messages)
# prepare config file
if is_rhel6:
@@ -399,7 +436,8 @@ def cluster_setup(argv):
options["quorum_options"],
modifiers["encryption"] == "1"
)
- process_library_reports(messages)
+ if messages:
+ process_library_reports(messages)
# setup on the local node
if "--local" in utils.pcs_options:
@@ -447,8 +485,10 @@ def cluster_setup(argv):
all_nodes_available = True
for node in primary_addr_list:
available, message = utils.canAddNodeToCluster(
- lib_env.node_communicator(),
- NodeAddresses(node)
+ lib_env.get_node_communicator(),
+ lib_env.get_node_target_factory().get_target(
+ NodeAddresses(node)
+ )
)
if not available:
all_nodes_available = False
@@ -475,17 +515,18 @@ def cluster_setup(argv):
generate_binary_key(random_bytes_count=128)
)
)
-
- distribute_files(
- lib_env.node_communicator(),
+ com_cmd = DistributeFiles(
lib_env.report_processor,
file_definitions,
- NodeAddressesList(
+ skip_offline_targets=modifiers["skip_offline_nodes"],
+ allow_fails=modifiers["force"],
+ )
+ com_cmd.set_targets(
+ lib_env.get_node_target_factory().get_target_list(
[NodeAddresses(node) for node in primary_addr_list]
- ),
- skip_offline_nodes=modifiers["skip_offline_nodes"],
- allow_incomplete_distribution="--force" in utils.pcs_options
+ )
)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
except LibraryError as e: #Theoretically, this should not happen
utils.process_library_reports(e.args)
@@ -991,9 +1032,10 @@ def start_cluster(argv):
wait = True
if len(argv) > 0:
- start_cluster_nodes(argv)
+ nodes = set(argv) # unique
+ start_cluster_nodes(nodes)
if wait:
- wait_for_nodes_started(argv, wait_timeout)
+ wait_for_nodes_started(nodes, wait_timeout)
return
print("Starting Cluster...")
@@ -1036,7 +1078,20 @@ def start_cluster_all():
wait_for_nodes_started(all_nodes, wait_timeout)
def start_cluster_nodes(nodes):
- node_errors = parallel_for_nodes(utils.startCluster, nodes, quiet=True)
+ # Large clusters take longer time to start up. So we make the timeout longer
+ # for each 8 nodes:
+ # 1 - 8 nodes: 1 * timeout
+ # 9 - 16 nodes: 2 * timeout
+ # 17 - 24 nodes: 3 * timeout
+ # and so on
+ # Users can override this and set their own timeout by specifying
+ # the --request-timeout option (see utils.sendHTTPRequest).
+ timeout = int(
+ settings.default_request_timeout * math.ceil(len(nodes) / 8.0)
+ )
+ node_errors = parallel_for_nodes(
+ utils.startCluster, nodes, quiet=True, timeout=timeout
+ )
if node_errors:
utils.err(
"unable to start all nodes\n" + "\n".join(node_errors.values())
@@ -1404,7 +1459,7 @@ def cluster_push(argv):
"--no-version"
]
patch, error, dummy_retval = runner.run(command)
- # dummy_retval == -1 means one of two things:
+ # dummy_retval == 1 means one of two things:
# a) an error has occured
# b) --original and --new differ
# therefore it's of no use to see if an error occurred
@@ -1552,12 +1607,15 @@ def _ensure_cluster_is_offline_if_atb_should_be_enabled(
"make SBD fencing effecive after this change. Cluster has to "
"be offline to be able to make this change."
)
- check_corosync_offline_on_nodes(
- lib_env.node_communicator(),
- lib_env.report_processor,
- corosync_conf.get_nodes(),
- skip_offline_nodes
+ com_cmd = CheckCorosyncOffline(
+ lib_env.report_processor, skip_offline_nodes
+ )
+ com_cmd.set_targets(
+ lib_env.get_node_target_factory().get_target_list(
+ corosync_conf.get_nodes()
+ )
)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
def cluster_node(argv):
@@ -1672,13 +1730,16 @@ def node_add(lib_env, node0, node1, modifiers):
"you must not specify ring 1 address for the node"
)
node_addr = NodeAddresses(node0, node1)
- node_communicator = lib_env.node_communicator()
- (canAdd, error) = utils.canAddNodeToCluster(node_communicator, node_addr)
+ (canAdd, error) = utils.canAddNodeToCluster(
+ lib_env.get_node_communicator(),
+ lib_env.get_node_target_factory().get_target(node_addr)
+ )
if not canAdd:
utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
report_processor = lib_env.report_processor
+ com_factory = lib_env.communicator_factory
# First set up everything else than corosync. Once the new node is
# present in corosync.conf / cluster.conf, it's considered part of a
@@ -1702,6 +1763,9 @@ def node_add(lib_env, node0, node1, modifiers):
)
# sbd setup
+ new_node_target = lib_env.get_node_target_factory().get_target(
+ node_addr
+ )
if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
if "--watchdog" not in utils.pcs_options:
watchdog = settings.sbd_watchdog_default
@@ -1736,50 +1800,51 @@ def node_add(lib_env, node0, node1, modifiers):
"therefore --device should not be specified"
)
- lib_sbd.check_sbd_on_node(
- report_processor, node_communicator, node_addr, watchdog,
- device_list
- )
+ com_cmd = CheckSbd(lib_env.report_processor)
+ com_cmd.add_request(new_node_target, watchdog, device_list)
+ run_and_raise(com_factory.get_communicator(), com_cmd)
- report_processor.process(
- lib_reports.sbd_config_distribution_started()
- )
- lib_sbd.set_sbd_config_on_node(
- report_processor,
- node_communicator,
- node_addr,
- sbd_cfg,
- watchdog,
- device_list,
- )
- report_processor.process(lib_reports.sbd_enabling_started())
- lib_sbd.enable_sbd_service_on_node(
- report_processor, node_communicator, node_addr
+ com_cmd = SetSbdConfig(lib_env.report_processor)
+ com_cmd.add_request(
+ new_node_target,
+ lib_sbd.create_sbd_config(
+ sbd_cfg, new_node_target.label, watchdog, device_list
+ )
)
+ run_and_raise(com_factory.get_communicator(), com_cmd)
+
+ com_cmd = EnableSbdService(lib_env.report_processor)
+ com_cmd.add_request(new_node_target)
+ run_and_raise(com_factory.get_communicator(), com_cmd)
else:
- report_processor.process(lib_reports.sbd_disabling_started())
- lib_sbd.disable_sbd_service_on_node(
- report_processor, node_communicator, node_addr
- )
+ com_cmd = DisableSbdService(lib_env.report_processor)
+ com_cmd.add_request(new_node_target)
+ run_and_raise(com_factory.get_communicator(), com_cmd)
# booth setup
booth_sync.send_all_config_to_node(
- node_communicator,
+ com_factory.get_communicator(),
report_processor,
- node_addr,
+ new_node_target,
rewrite_existing=modifiers["force"],
skip_wrong_config=modifiers["force"]
)
if os.path.isfile(settings.corosync_authkey_file):
- distribute_files(
- lib_env.node_communicator(),
+ com_cmd = DistributeFiles(
lib_env.report_processor,
node_communication_format.corosync_authkey_file(
open(settings.corosync_authkey_file).read()
),
- NodeAddressesList([node_addr]),
+ # added force, it was missing before
+ # but it doesn't make sence here
+ skip_offline_targets=modifiers["skip_offline_nodes"],
+ allow_fails=modifiers["force"],
)
+ com_cmd.set_targets(
+ lib_env.get_node_target_factory().get_target_list([node_addr])
+ )
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
# do not send pcmk authkey to guest and remote nodes, they either have
# it or are not working anyway
@@ -1816,14 +1881,14 @@ def node_add(lib_env, node0, node1, modifiers):
# 1. add the new node to corosync.conf
# 2. reload corosync.conf before the new node is started
# 3. start the new node
- # If done otherwise, membership gets broken a qdevice hangs. Cluster
+ # If done otherwise, membership gets broken and qdevice hangs. Cluster
# will recover after a minute or so but still it's a wrong way.
# When corosync 1 is in use, the procedure for adding a node is:
# 1. add the new node to cluster.conf
# 2. start the new node
# Starting the node will automaticall reload cluster.conf on all
# nodes. If the config is reloaded before the new node is started,
- # the new node gets fenced by the cluster,
+ # the new node gets fenced by the cluster.
output, retval = utils.reloadCorosync()
if corosync_conf != None:
# send local cluster pcsd configs to the new node
@@ -2204,33 +2269,30 @@ def cluster_destroy(argv):
pass
def cluster_verify(argv):
- nofilename = True
- if len(argv) == 1:
- filename = argv.pop(0)
- nofilename = False
- elif len(argv) > 1:
+ if len(argv) > 1:
usage.cluster("verify")
+ raise SystemExit(1)
- options = []
- if "-V" in utils.pcs_options:
- options.append("-V")
- if nofilename:
- options.append("--live-check")
- else:
- options.append("--xml-file")
- options.append(filename)
- output, retval = utils.run([settings.crm_verify] + options)
- if output != "":
- print(output)
+ if argv:
+ filename = argv[0]
+ if not utils.usefile:
+ #We must operate on given cib everywhere.
+ utils.usefile = True
+ utils.filename = filename
+ elif os.path.abspath(filename) == os.path.abspath(utils.filename):
+ warn("File '{0}' specified twice".format(os.path.abspath(filename)))
+ else:
+ raise error(
+ "Ambiguous cib filename specification: '{0}' vs -f '{1}'"
+ .format(filename, utils.filename)
+ )
lib = utils.get_library_wrapper()
try:
- lib.fencing_topology.verify()
+ lib.cluster.verify(verbose="-V" in utils.pcs_options)
except LibraryError as e:
utils.process_library_reports(e.args)
- return retval
-
def cluster_report(argv):
if len(argv) != 1:
usage.cluster(["report"])
@@ -2260,6 +2322,12 @@ def cluster_report(argv):
crm_report_opts.append(outfile)
output, retval = utils.run([settings.crm_report] + crm_report_opts)
+ if (
+ retval != 0
+ and
+ "ERROR: Cannot determine nodes; specify --nodes or --single-node" in output
+ ):
+ utils.err("cluster is not configured on this node")
newoutput = ""
for line in output.split("\n"):
if line.startswith("cat:") or line.startswith("grep") or line.startswith("grep") or line.startswith("tail"):
@@ -2272,6 +2340,8 @@ def cluster_report(argv):
continue
if "to diagnose" in line:
continue
+ if "--dest" in line:
+ line = line.replace("--dest", "<dest>")
newoutput = newoutput + line + "\n"
if retval != 0:
utils.err(newoutput)
diff --git a/pcs/common/env_file_role_codes.py b/pcs/common/env_file_role_codes.py
index ff777ae..de91649 100644
--- a/pcs/common/env_file_role_codes.py
+++ b/pcs/common/env_file_role_codes.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
BOOTH_CONFIG = "BOOTH_CONFIG"
diff --git a/pcs/common/fencing_topology.py b/pcs/common/fencing_topology.py
index 24fd15b..6f574d1 100644
--- a/pcs/common/fencing_topology.py
+++ b/pcs/common/fencing_topology.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
TARGET_TYPE_NODE = "node"
diff --git a/pcs/common/node_communicator.py b/pcs/common/node_communicator.py
new file mode 100644
index 0000000..f7fe241
--- /dev/null
+++ b/pcs/common/node_communicator.py
@@ -0,0 +1,558 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import base64
+import io
+import re
+from collections import namedtuple
+
+try:
+ # python2
+ from urllib import urlencode as urllib_urlencode
+except ImportError:
+ # python3
+ from urllib.parse import urlencode as urllib_urlencode
+
+# We should ignore SIGPIPE when using pycurl.NOSIGNAL - see the libcurl tutorial
+# for more info.
+try:
+ import signal
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+except ImportError:
+ pass
+
+from pcs import settings
+from pcs.common import pcs_pycurl as pycurl
+
+
+def _find_value_for_possible_keys(value_dict, possible_key_list):
+ for key in possible_key_list:
+ if key in value_dict:
+ return value_dict[key]
+ return None
+
+
+class NodeTargetFactory(object):
+ def __init__(self, auth_tokens, ports):
+ self._auth_tokens = auth_tokens
+ self._ports = ports
+
+ def _get_token(self, possible_names):
+ return _find_value_for_possible_keys(self._auth_tokens, possible_names)
+
+ def _get_port(self, possible_names):
+ return _find_value_for_possible_keys(self._ports, possible_names)
+
+ def get_target(self, node_addresses):
+ possible_names = [node_addresses.label, node_addresses.ring0]
+ if node_addresses.ring1:
+ possible_names.append(node_addresses.ring1)
+ return RequestTarget.from_node_addresses(
+ node_addresses,
+ token=self._get_token(possible_names),
+ port=self._get_port(possible_names),
+ )
+
+ def get_target_list(self, node_addresses_list):
+ return [self.get_target(node) for node in node_addresses_list]
+
+ def get_target_from_hostname(self, hostname):
+ return RequestTarget(
+ hostname,
+ token=self._get_token([hostname]),
+ port=self._get_port([hostname]),
+ )
+
+
+class RequestData(
+ namedtuple("RequestData", ["action", "structured_data", "data"])
+):
+ """
+ This class represents action and data asociated with action which will be
+ send in request
+ """
+
+ def __new__(cls, action, structured_data=()):
+ """
+ string action -- action to perform
+ list structured_data -- list of tuples, data to send with specified
+ action
+ """
+ return super(RequestData, cls).__new__(
+ cls, action, structured_data, urllib_urlencode(structured_data)
+ )
+
+
+class RequestTarget(namedtuple(
+ "RequestTarget", ["label", "address_list", "port", "token"]
+)):
+ """
+ This class represents target (host) for request to be performed on
+ """
+
+ def __new__(cls, label, address_list=None, port=None, token=None):
+ """
+ string label -- label for the host, this is used as only hostname
+ if address_list is not defined
+ list address_list -- list of all possible hostnames on which the host is
+ reachable
+ int port -- target communnication port
+ string token -- authentication token
+ """
+ if not address_list:
+ address_list = [label]
+ return super(RequestTarget, cls).__new__(
+ cls, label, list(address_list), port, token
+ )
+
+ @classmethod
+ def from_node_addresses(cls, node_addresses, port=None, token=None):
+ """
+ Create object RequestTarget from NodeAddresses instance. Returns new
+ RequestTarget instance.
+
+ NodeAddresses node_addresses -- node which defines target
+ string port -- target communnication port
+ string token -- authentication token
+ """
+ address_list = [node_addresses.ring0]
+ if node_addresses.ring1:
+ address_list.append(node_addresses.ring1)
+ return cls(
+ node_addresses.label,
+ address_list=address_list, port=port, token=token
+ )
+
+
+class Request(object):
+ """
+ This class represents request. With usage of RequestTarget it provides
+ interface for getting next available host to make request on.
+ """
+
+ def __init__(self, request_target, request_data):
+ """
+ RequestTarget request_target
+ RequestData request_data
+ """
+ self._target = request_target
+ self._data = request_data
+ self._current_host_iterator = iter(request_target.address_list)
+ self._current_host = None
+ self.next_host()
+
+ def next_host(self):
+ """
+ Move to the next available host. Raises StopIteration when there is no
+ host to use.
+ """
+ self._current_host = next(self._current_host_iterator)
+
+ @property
+ def url(self):
+ """
+ URL representing request using current host.
+ """
+ return "https://{host}:{port}/{request}".format(
+ host="[{0}]".format(self.host) if ":" in self.host else self.host,
+ port=(
+ self._target.port
+ if self._target.port
+ else settings.pcsd_default_port
+ ),
+ request=self._data.action
+ )
+
+ @property
+ def host(self):
+ return self._current_host
+
+ @property
+ def host_label(self):
+ return self._target.label
+
+ @property
+ def target(self):
+ return self._target
+
+ @property
+ def data(self):
+ return self._data.data
+
+ @property
+ def action(self):
+ return self._data.action
+
+ @property
+ def cookies(self):
+ cookies = {}
+ if self._target.token:
+ cookies["token"] = self._target.token
+ return cookies
+
+ def __repr__(self):
+ return str("Request({0}, {1})").format(self._target, self._data)
+
+
+class Response(object):
+ """
+ This class represents response for request which is available as instance
+ property.
+ """
+
+ def __init__(self, handle, was_connected, errno=None, error_msg=None):
+ self._handle = handle
+ self._was_connected = was_connected
+ self._errno = errno
+ self._error_msg = error_msg
+ self._data = None
+ self._debug = None
+
+ @classmethod
+ def connection_successful(cls, handle):
+ """
+ Returns Response instance that is marked as successfully connected.
+
+ pycurl.Curl handle -- curl easy handle, which connection was successful
+ """
+ return cls(handle, True)
+
+ @classmethod
+ def connection_failure(cls, handle, errno, error_msg):
+ """
+ Returns Response instance that is marked as not successfuly connected.
+
+ pycurl.Curl handle -- curl easy handle, which was not connected
+ int errno -- error number
+ string error_msg -- text description of error
+ """
+ return cls(handle, False, errno, error_msg)
+
+ @property
+ def request(self):
+ return self._handle.request_obj
+
+ @property
+ def handle(self):
+ return self._handle
+
+ @property
+ def was_connected(self):
+ return self._was_connected
+
+ @property
+ def errno(self):
+ return self._errno
+
+ @property
+ def error_msg(self):
+ return self._error_msg
+
+ @property
+ def data(self):
+ if self._data is None:
+ self._data = self._handle.output_buffer.getvalue().decode("utf-8")
+ return self._data
+
+ @property
+ def debug(self):
+ if self._debug is None:
+ self._debug = self._handle.debug_buffer.getvalue().decode("utf-8")
+ return self._debug
+
+ @property
+ def response_code(self):
+ if not self.was_connected:
+ return None
+ return self._handle.getinfo(pycurl.RESPONSE_CODE)
+
+ def __repr__(self):
+ return str(
+ "Response({0} data='{1}' was_connected={2}) errno='{3}'"
+ " error_msg='{4}' response_code='{5}')"
+ ).format(
+ self.request,
+ self.data,
+ self.was_connected,
+ self.errno,
+ self.error_msg,
+ self.response_code,
+ )
+
+class NodeCommunicatorFactory(object):
+ def __init__(self, communicator_logger, user, groups, request_timeout):
+ self._logger = communicator_logger
+ self._user = user
+ self._groups = groups
+ self._request_timeout = request_timeout
+
+ def get_communicator(self):
+ return self.get_simple_communicator()
+
+ def get_simple_communicator(self):
+ return Communicator(
+ self._logger, self._user, self._groups, self._request_timeout
+ )
+
+ def get_multiaddress_communicator(self):
+ return MultiaddressCommunicator(
+ self._logger, self._user, self._groups, self._request_timeout
+
+ )
+
+
+class Communicator(object):
+ """
+ This class provides simple interface for making parallel requests.
+ The instances of this class are not thread-safe! It is intended to use it
+ only in a single thread. Use an unique instance for each thread.
+ """
+ curl_multi_select_timeout_default = 0.8 # in seconds
+
+ def __init__(self, communicator_logger, user, groups, request_timeout=None):
+ self._logger = communicator_logger
+ self._auth_cookies = _get_auth_cookies(user, groups)
+ self._request_timeout = (
+ request_timeout
+ if request_timeout is not None
+ else settings.default_request_timeout
+ )
+ self._multi_handle = pycurl.CurlMulti()
+ self._is_running = False
+ # This is used just for storing references of curl easy handles.
+ # We need to have references for all the handles, so they don't be
+ # cleaned up by the garbage collector.
+ self._easy_handle_list = []
+
+ def add_requests(self, request_list):
+ """
+ Add requests to queue to be processed. It is possible to call this
+ method before getting generator using start_loop method and also during
+ getting responses from generator. Requests are not performed after
+ calling this method, but only when generator returned by start_loop
+ method is in progress (returned at least one response and not raised
+ StopIteration exception).
+
+ list request_list -- Request objects to add to the queue
+ """
+ for request in request_list:
+ handle = _create_request_handle(
+ request, self._auth_cookies, self._request_timeout,
+ )
+ self._easy_handle_list.append(handle)
+ self._multi_handle.add_handle(handle)
+ if self._is_running:
+ self._logger.log_request_start(request)
+
+ def start_loop(self):
+ """
+ Returns generator. When generator is invoked, all requests in queue
+ (added by method add_requests) will be invoked in parallel, and
+ generator will then return responses for these requests. It is possible
+ to add new request to the queue while the generator is in progres.
+ Generator will stop (raise StopIteration) after all requests (also those
+ added after creation of generator) are processed.
+
+ WARNING: do not use multiple instances of generator (of one
+ Communicator instance) when there is one which didn't finish
+ (raised StopIteration). It wil cause AssertionError.
+
+ USAGE:
+ com = Communicator(...)
+ com.add_requests([
+ Request(...), ...
+ ])
+ for response in communicator.start_loop():
+ # do something with response
+ # if needed, add some new requests to the queue
+ com.add_requests([Request(...)])
+ """
+ if self._is_running:
+ raise AssertionError("Method start_loop already running")
+ self._is_running = True
+ for handle in self._easy_handle_list:
+ self._logger.log_request_start(handle.request_obj)
+
+ finished_count = 0
+ while finished_count < len(self._easy_handle_list):
+ self.__multi_perform()
+ self.__wait_for_multi_handle()
+ response_list = self.__get_all_ready_responses()
+ for response in response_list:
+ # free up memory for next usage of this Communicator instance
+ self._multi_handle.remove_handle(response.handle)
+ self._logger.log_response(response)
+ yield response
+ # if something was added to the queue in the meantime, run it
+ # immediately, so we don't need to wait until all responses will
+ # be processed
+ self.__multi_perform()
+ finished_count += len(response_list)
+ self._easy_handle_list = []
+ self._is_running = False
+
+ def __get_all_ready_responses(self):
+ response_list = []
+ repeat = True
+ while repeat:
+ num_queued, ok_list, err_list = self._multi_handle.info_read()
+ response_list.extend(
+ [Response.connection_successful(handle) for handle in ok_list] +
+ [
+ Response.connection_failure(handle, errno, error_msg)
+ for handle, errno, error_msg in err_list
+ ]
+ )
+ repeat = num_queued > 0
+ return response_list
+
+ def __multi_perform(self):
+ # run all internal operation required by libcurl
+ status, num_to_process = self._multi_handle.perform()
+ # if perform returns E_CALL_MULTI_PERFORM it requires to call perform
+ # once again right away
+ while status == pycurl.E_CALL_MULTI_PERFORM:
+ status, num_to_process = self._multi_handle.perform()
+ return num_to_process
+
+ def __wait_for_multi_handle(self):
+ # try to wait until there is something to do for us
+ need_to_wait = True
+ while need_to_wait:
+ timeout = self._multi_handle.timeout()
+ if timeout == 0:
+ # if timeout == 0 then there is something to precess already
+ return
+ timeout = (
+ timeout / 1000.0
+ if timeout > 0
+ # curl don't have timeout set, so we can use our default
+ else self.curl_multi_select_timeout_default
+ )
+ # when value returned from select is -1, it timed out, so we can
+ # wait
+ need_to_wait = (self._multi_handle.select(timeout) == -1)
+
+
+class MultiaddressCommunicator(Communicator):
+ """
+ Class with same interface as Communicator. In difference with Communicator,
+ it takes advantage of multiple hosts in RequestTarget. So if it is not
+ possible to connect to target using first hostname, it will use next one
+ until connection will be successful or there is no host left.
+ """
+ def start_loop(self):
+ for response in super(MultiaddressCommunicator, self).start_loop():
+ if response.was_connected:
+ yield response
+ continue
+ try:
+ previous_host = response.request.host
+ response.request.next_host()
+ self._logger.log_retry(response, previous_host)
+ self.add_requests([response.request])
+ except StopIteration:
+ self._logger.log_no_more_addresses(response)
+ yield response
+
+
+class CommunicatorLoggerInterface(object):
+ def log_request_start(self, request):
+ raise NotImplementedError()
+
+ def log_response(self, response):
+ raise NotImplementedError()
+
+ def log_retry(self, response, previous_host):
+ raise NotImplementedError()
+
+ def log_no_more_addresses(self, response):
+ raise NotImplementedError()
+
+
+def _get_auth_cookies(user, group_list):
+ """
+ Returns input parameters in a dictionary which is prepared to be converted
+ to cookie string.
+
+ string user -- CIB user
+ string group_list -- CIB user groups
+ """
+ # Let's be safe about characters in variables (they can come from env)
+ # and do base64. We cannot do it for CIB_user however to be backward
+ # compatible so we at least remove disallowed characters.
+ cookies = {}
+ if user:
+ cookies["CIB_user"] = re.sub(r"[^!-~]", "", user).replace(";", "")
+ if group_list:
+ # python3 requires the value to be bytes not str
+ cookies["CIB_user_groups"] = base64.b64encode(
+ " ".join(group_list).encode("utf-8")
+ )
+ return cookies
+
+
+def _create_request_handle(request, cookies, timeout):
+ """
+ Returns Curl object (easy handle) which is set up witc specified parameters.
+
+ Request request -- request specification
+ dict cookies -- cookies to add to request
+ int timeot -- request timeout
+ """
+ # it is not possible to take this callback out of this function, because of
+ # curl API
+ def __debug_callback(data_type, debug_data):
+ prefixes = {
+ pycurl.DEBUG_TEXT: b"* ",
+ pycurl.DEBUG_HEADER_IN: b"< ",
+ pycurl.DEBUG_HEADER_OUT: b"> ",
+ pycurl.DEBUG_DATA_IN: b"<< ",
+ pycurl.DEBUG_DATA_OUT: b">> ",
+ }
+ if data_type in prefixes:
+ debug_output.write(prefixes[data_type])
+ debug_output.write(debug_data)
+ if not debug_data.endswith(b"\n"):
+ debug_output.write(b"\n")
+
+ output = io.BytesIO()
+ debug_output = io.BytesIO()
+ cookies.update(request.cookies)
+ handle = pycurl.Curl()
+ handle.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTPS)
+ handle.setopt(pycurl.TIMEOUT, timeout)
+ handle.setopt(pycurl.URL, request.url.encode("utf-8"))
+ handle.setopt(pycurl.WRITEFUNCTION, output.write)
+ handle.setopt(pycurl.VERBOSE, 1)
+ handle.setopt(pycurl.DEBUGFUNCTION, __debug_callback)
+ handle.setopt(pycurl.SSL_VERIFYHOST, 0)
+ handle.setopt(pycurl.SSL_VERIFYPEER, 0)
+ handle.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading
+ if cookies:
+ handle.setopt(
+ pycurl.COOKIE, _dict_to_cookies(cookies).encode("utf-8")
+ )
+ if request.data:
+ handle.setopt(
+ pycurl.COPYPOSTFIELDS, request.data.encode("utf-8")
+ )
+ # add reference for request object and output bufers to handle, so later
+ # we don't need to match these objects when they are returned from
+ # pycurl after they've been processed
+ # similar usage is in pycurl example:
+ # https://github.com/pycurl/pycurl/blob/REL_7_19_0_3/examples/retriever-multi.py
+ handle.request_obj = request
+ handle.output_buffer = output
+ handle.debug_buffer = debug_output
+ return handle
+
+
+def _dict_to_cookies(cookies_dict):
+ return ";".join([
+ "{0}={1}".format(key, value)
+ for key, value in sorted(cookies_dict.items())
+ ])
diff --git a/pcs/common/pcs_pycurl.py b/pcs/common/pcs_pycurl.py
index 4e94eeb..0e3fddb 100644
--- a/pcs/common/pcs_pycurl.py
+++ b/pcs/common/pcs_pycurl.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index ebc3cc7..6077ce1 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
# force cathegories
@@ -66,6 +65,7 @@ CIB_ACL_TARGET_ALREADY_EXISTS = "CIB_ACL_TARGET_ALREADY_EXISTS"
CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID"
CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
+CIB_DIFF_ERROR = "CIB_DIFF_ERROR"
CIB_FENCING_LEVEL_ALREADY_EXISTS = "CIB_FENCING_LEVEL_ALREADY_EXISTS"
CIB_FENCING_LEVEL_DOES_NOT_EXIST = "CIB_FENCING_LEVEL_DOES_NOT_EXIST"
CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
@@ -100,6 +100,7 @@ COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR"
COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
CRM_MON_ERROR = "CRM_MON_ERROR"
+DEFAULTS_CAN_BE_OVERRIDEN = "DEFAULTS_CAN_BE_OVERRIDEN"
DEPRECATED_OPTION = "DEPRECATED_OPTION"
DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
@@ -117,6 +118,7 @@ ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
ID_BELONGS_TO_UNEXPECTED_TYPE = "ID_BELONGS_TO_UNEXPECTED_TYPE"
ID_NOT_FOUND = 'ID_NOT_FOUND'
IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
+INVALID_CIB_CONTENT = "INVALID_CIB_CONTENT"
INVALID_ID = "INVALID_ID"
INVALID_OPTION = "INVALID_OPTION"
INVALID_OPTION_TYPE = "INVALID_OPTION_TYPE"
@@ -142,7 +144,9 @@ NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPO
NODE_COMMUNICATION_ERROR_TIMED_OUT = "NODE_COMMUNICATION_ERROR_TIMED_OUT"
NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED"
NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED"
+NODE_COMMUNICATION_NO_MORE_ADDRESSES = "NODE_COMMUNICATION_NO_MORE_ADDRESSES"
NODE_COMMUNICATION_PROXY_IS_SET = "NODE_COMMUNICATION_PROXY_IS_SET"
+NODE_COMMUNICATION_RETRYING = "NODE_COMMUNICATION_RETRYING"
NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED"
NODE_NOT_FOUND = "NODE_NOT_FOUND"
NODE_REMOVE_IN_PACEMAKER_FAILED = "NODE_REMOVE_IN_PACEMAKER_FAILED"
@@ -235,6 +239,7 @@ STONITH_RESOURCES_DO_NOT_EXIST = "STONITH_RESOURCES_DO_NOT_EXIST"
SERVICE_COMMANDS_ON_NODES_STARTED = "SERVICE_COMMANDS_ON_NODES_STARTED"
SERVICE_COMMAND_ON_NODE_ERROR = "SERVICE_COMMAND_ON_NODE_ERROR"
SERVICE_COMMAND_ON_NODE_SUCCESS = "SERVICE_COMMAND_ON_NODE_SUCCESS"
+TMP_FILE_WRITE = "TMP_FILE_WRITE"
UNABLE_TO_DETERMINE_USER_UID = "UNABLE_TO_DETERMINE_USER_UID"
UNABLE_TO_DETERMINE_GROUP_GID = "UNABLE_TO_DETERMINE_GROUP_GID"
UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA'
diff --git a/pcs/common/test/test_node_communicator.py b/pcs/common/test/test_node_communicator.py
new file mode 100644
index 0000000..a5d3fa6
--- /dev/null
+++ b/pcs/common/test/test_node_communicator.py
@@ -0,0 +1,576 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import io
+
+from pcs.test.tools.pcs_unittest import mock, TestCase
+from pcs.test.tools.custom_mock import (
+ MockCurl,
+ MockCurlMulti,
+)
+
+from pcs import settings
+from pcs.common import pcs_pycurl as pycurl
+from pcs.lib.node import NodeAddresses
+import pcs.common.node_communicator as lib
+
+
+class RequestDataUrlEncodeTest(TestCase):
+ def test_no_data(self):
+ action = "action"
+ data = lib.RequestData(action)
+ self.assertEqual(action, data.action)
+ self.assertEqual(0, len(data.structured_data))
+ self.assertEqual("", data.data)
+
+ def test_with_data(self):
+ action = "action"
+ orig_data = [
+ ("key1", "value1"),
+ ("spacial characters", "+-+/%&?'\";[]()*^$#@!~`{:}<>")
+ ]
+ data = lib.RequestData(action, orig_data)
+ self.assertEqual(action, data.action)
+ self.assertEqual(orig_data, data.structured_data)
+ expected_raw_data = (
+ "key1=value1&spacial+characters=%2B-%2B%2F%25%26%3F%27%22%3B%5B" +
+ "%5D%28%29%2A%5E%24%23%40%21%7E%60%7B%3A%7D%3C%3E"
+ )
+ self.assertEqual(expected_raw_data, data.data)
+
+
+class RequestTargetConstructorTest(TestCase):
+ def test_no_adresses(self):
+ label = "label"
+ target = lib.RequestTarget(label)
+ self.assertEqual(label, target.label)
+ self.assertEqual([label], target.address_list)
+
+ def test_with_adresses(self):
+ label = "label"
+ address_list = ["a1", "a2"]
+ original_list = list(address_list)
+ target = lib.RequestTarget(label, address_list=address_list)
+ address_list.append("a3")
+ self.assertEqual(label, target.label)
+ self.assertIsNot(address_list, target.address_list)
+ self.assertEqual(original_list, target.address_list)
+
+
+class RequestTargetFromNodeAdressesTest(TestCase):
+ def test_ring0(self):
+ ring0 = "ring0"
+ target = lib.RequestTarget.from_node_addresses(NodeAddresses(ring0))
+ self.assertEqual(ring0, target.label)
+ self.assertEqual([ring0], target.address_list)
+
+ def test_ring1(self):
+ ring0 = "ring0"
+ ring1 = "ring1"
+ target = lib.RequestTarget.from_node_addresses(
+ NodeAddresses(ring0, ring1)
+ )
+ self.assertEqual(ring0, target.label)
+ self.assertEqual([ring0, ring1], target.address_list)
+
+ def test_ring0_with_label(self):
+ ring0 = "ring0"
+ label = "label"
+ target = lib.RequestTarget.from_node_addresses(
+ NodeAddresses(ring0, name=label)
+ )
+ self.assertEqual(label, target.label)
+ self.assertEqual([ring0], target.address_list)
+
+ def test_ring1_with_label(self):
+ ring0 = "ring0"
+ ring1 = "ring1"
+ label = "label"
+ target = lib.RequestTarget.from_node_addresses(
+ NodeAddresses(ring0, ring1, name=label)
+ )
+ self.assertEqual(label, target.label)
+ self.assertEqual([ring0, ring1], target.address_list)
+
+
+class RequestUrlTest(TestCase):
+ action = "action"
+
+ def _get_request(self, target):
+ return lib.Request(target, lib.RequestData(self.action))
+
+ def assert_url(self, actual_url, host, action, port=None):
+ if port is None:
+ port = settings.pcsd_default_port
+ self.assertEqual(
+ "https://{host}:{port}/{action}".format(
+ host=host, action=action, port=port
+ ),
+ actual_url
+ )
+
+ def test_url_basic(self):
+ host = "host"
+ self.assert_url(
+ self._get_request(lib.RequestTarget(host)).url, host, self.action,
+ )
+
+ def test_url_with_port(self):
+ host = "host"
+ port = 1234
+ self.assert_url(
+ self._get_request(lib.RequestTarget(host, port=port)).url,
+ host, self.action, port=port,
+ )
+
+ def test_url_ipv6(self):
+ host = "::1"
+ self.assert_url(
+ self._get_request(lib.RequestTarget(host)).url,
+ "[{0}]".format(host), self.action,
+ )
+
+ def test_url_multiaddr(self):
+ hosts = ["ring0", "ring1"]
+ action = "action"
+ request = self._get_request(
+ lib.RequestTarget.from_node_addresses(NodeAddresses(*hosts))
+ )
+ self.assert_url(request.url, hosts[0], action)
+ request.next_host()
+ self.assert_url(request.url, hosts[1], action)
+
+
+class RequestHostTest(TestCase):
+ action = "action"
+
+ def _get_request(self, target):
+ return lib.Request(target, lib.RequestData(self.action))
+
+ def test_one_host(self):
+ host = "host"
+ request = self._get_request(lib.RequestTarget(host))
+ self.assertEqual(host, request.host)
+ self.assertRaises(StopIteration, request.next_host)
+
+ def test_multiple_hosts(self):
+ hosts = ["host1", "host2", "host3"]
+ request = self._get_request(lib.RequestTarget("label", hosts))
+ for host in hosts:
+ self.assertEqual(host, request.host)
+ if host == hosts[-1]:
+ self.assertRaises(StopIteration, request.next_host)
+ else:
+ request.next_host()
+
+
+class RequestCookiesTest(TestCase):
+ def _get_request(self, token=None):
+ return lib.Request(
+ lib.RequestTarget("host", token=token), lib.RequestData("action")
+ )
+
+ def test_with_token(self):
+ token = "token1"
+ self.assertEqual({"token": token}, self._get_request(token).cookies)
+
+ def test_without_token(self):
+ self.assertEqual({}, self._get_request().cookies)
+
+
+class ResponseTest(TestCase):
+ def fixture_handle(self, info, request, data, debug):
+ handle = MockCurl(info)
+ handle.request_obj = request
+ handle.output_buffer = io.BytesIO()
+ handle.output_buffer.write(data.encode("utf-8"))
+ handle.debug_buffer = io.BytesIO()
+ handle.debug_buffer.write(debug.encode("utf-8"))
+ return handle
+
+ def test_connection_successful(self):
+ request = lib.Request(
+ lib.RequestTarget("host"), lib.RequestData("request")
+ )
+ output = "output"
+ debug = "debug"
+ response_code = 200
+ handle = self.fixture_handle(
+ {pycurl.RESPONSE_CODE: 200}, request, output, debug
+ )
+ response = lib.Response.connection_successful(handle)
+ self.assertEqual(request, response.request)
+ self.assertTrue(response.was_connected)
+ self.assertIsNone(response.errno)
+ self.assertIsNone(response.error_msg)
+ self.assertEqual(output, response.data)
+ self.assertEqual(debug, response.debug)
+ self.assertEqual(response_code, response.response_code)
+
+ def test_connection_failure(self):
+ request = lib.Request(
+ lib.RequestTarget("host"), lib.RequestData("request")
+ )
+ output = "output"
+ debug = "debug"
+ errno = 1
+ error_msg = "error"
+ handle = self.fixture_handle({}, request, output, debug)
+ response = lib.Response.connection_failure(handle, errno, error_msg)
+ self.assertEqual(request, response.request)
+ self.assertFalse(response.was_connected)
+ self.assertEqual(errno, response.errno)
+ self.assertEqual(error_msg, response.error_msg)
+ self.assertEqual(output, response.data)
+ self.assertEqual(debug, response.debug)
+ self.assertIsNone(response.response_code)
+
+
+ at mock.patch("pcs.common.node_communicator.pycurl.Curl")
+class CreateRequestHandleTest(TestCase):
+ _common_opts = {
+ pycurl.PROTOCOLS: pycurl.PROTO_HTTPS,
+ pycurl.VERBOSE: 1,
+ pycurl.SSL_VERIFYHOST: 0,
+ pycurl.SSL_VERIFYPEER: 0,
+ pycurl.NOSIGNAL: 1,
+ }
+
+ def test_all_info(self, mock_curl):
+ mock_curl.return_value = MockCurl(
+ None, b"output", [
+ (pycurl.DEBUG_TEXT, b"debug"),
+ (pycurl.DEBUG_DATA_OUT, b"info\n"),
+ ]
+ )
+ request = lib.Request(
+ lib.RequestTarget(
+ "label", ["host1", "host2"], port=123, token="token_val",
+ ),
+ lib.RequestData("action", [("data", "value")])
+ )
+ cookies = {
+ "name1": "val1",
+ "name2": "val2",
+ }
+ handle = lib._create_request_handle(request, cookies, 1)
+ expected_opts = {
+ pycurl.TIMEOUT: 1,
+ pycurl.URL: request.url.encode("utf-8"),
+ pycurl.COOKIE: "name1=val1;name2=val2;token=token_val".encode(
+ "utf-8"
+ ),
+ pycurl.COPYPOSTFIELDS: "data=value".encode("utf-8"),
+ }
+ expected_opts.update(self._common_opts)
+ self.assertLessEqual(
+ set(expected_opts.items()), set(handle.opts.items())
+ )
+ self.assertIs(request, handle.request_obj)
+ self.assertEqual("", handle.output_buffer.getvalue().decode("utf-8"))
+ self.assertEqual("", handle.debug_buffer.getvalue().decode("utf-8"))
+ handle.perform()
+ self.assertEqual(
+ "output", handle.output_buffer.getvalue().decode("utf-8")
+ )
+ self.assertEqual(
+ "* debug\n>> info\n", handle.debug_buffer.getvalue().decode("utf-8")
+ )
+
+ def test_basic(self, mock_curl):
+ mock_curl.return_value = MockCurl(None)
+ request = lib.Request(
+ lib.RequestTarget("label"), lib.RequestData("action")
+ )
+ handle = lib._create_request_handle(request, {}, 10)
+ expected_opts = {
+ pycurl.TIMEOUT: 10,
+ pycurl.URL: request.url.encode("utf-8"),
+ }
+ expected_opts.update(self._common_opts)
+ self.assertLessEqual(
+ set(expected_opts.items()), set(handle.opts.items())
+ )
+ self.assertFalse(pycurl.COOKIE in handle.opts)
+ self.assertFalse(pycurl.COPYPOSTFIELDS in handle.opts)
+ self.assertIs(request, handle.request_obj)
+ self.assertEqual("", handle.output_buffer.getvalue().decode("utf-8"))
+ self.assertEqual("", handle.debug_buffer.getvalue().decode("utf-8"))
+ handle.perform()
+ self.assertEqual("", handle.output_buffer.getvalue().decode("utf-8"))
+ self.assertEqual("", handle.debug_buffer.getvalue().decode("utf-8"))
+
+
+def fixture_request(host_id=1, action="action"):
+ return lib.Request(
+ lib.RequestTarget("host{0}".format(host_id)), lib.RequestData(action),
+ )
+
+
+class CommunicatorBaseTest(TestCase):
+ def setUp(self):
+ self.mock_com_log = mock.MagicMock(
+ spec_set=lib.CommunicatorLoggerInterface
+ )
+
+ def get_communicator(self):
+ return lib.Communicator(self.mock_com_log, None, None)
+
+ def get_multiaddress_communicator(self):
+ return lib.MultiaddressCommunicator(self.mock_com_log, None, None)
+
+
+ at mock.patch(
+ "pcs.common.node_communicator.pycurl.CurlMulti",
+ side_effect=lambda: MockCurlMulti([1])
+)
+ at mock.patch("pcs.common.node_communicator._create_request_handle")
+class CommunicatorSimpleTest(CommunicatorBaseTest):
+ def get_response(self, com, mock_create_handle, handle):
+ request = fixture_request(0, "action")
+ handle.request_obj = request
+ mock_create_handle.return_value = handle
+ com.add_requests([request])
+ self.assertEqual(0, self.mock_com_log.log_request_start.call_count)
+ response_list = list(com.start_loop())
+ self.assertEqual(1, len(response_list))
+ response = response_list[0]
+ self.assertIs(handle, response.handle)
+ self.assertIs(request, response.request)
+ mock_create_handle.assert_called_once_with(
+ request, {}, settings.default_request_timeout
+ )
+ return response
+
+ def assert_common_checks(self, com, response):
+ self.assertEqual(response.handle.error is None, response.was_connected)
+ self.mock_com_log.log_request_start.assert_called_once_with(response.request)
+ self.mock_com_log.log_response.assert_called_once_with(response)
+ self.assertEqual(0, self.mock_com_log.log_retry.call_count)
+ self.assertEqual(0, self.mock_com_log.log_no_more_addresses.call_count)
+ com._multi_handle.assert_no_handle_left()
+
+ def test_simple(self, mock_create_handle, _):
+ com = self.get_communicator()
+ response = self.get_response(com, mock_create_handle, MockCurl())
+ self.assert_common_checks(com, response)
+
+ def test_failure(self, mock_create_handle, _):
+ com = self.get_communicator()
+ expected_reason = "expected reason"
+ errno = pycurl.E_SEND_ERROR
+ response = self.get_response(
+ com, mock_create_handle, MockCurl(error=(errno, expected_reason))
+ )
+ self.assert_common_checks(com, response)
+ self.assertEqual(errno, response.errno)
+ self.assertEqual(expected_reason, response.error_msg)
+
+
+class CommunicatorMultiTest(CommunicatorBaseTest):
+ @mock.patch("pcs.common.node_communicator._create_request_handle")
+ @mock.patch(
+ "pcs.common.node_communicator.pycurl.CurlMulti",
+ side_effect=lambda: MockCurlMulti([1, 1])
+ )
+ def test_call_start_loop_multiple_times(self, _, mock_create_handle):
+ com = self.get_communicator()
+ mock_create_handle.side_effect = lambda request, _, __: MockCurl(
+ request=request
+ )
+ com.add_requests([fixture_request(i) for i in range(2)])
+ next(com.start_loop())
+ with self.assertRaises(AssertionError):
+ next(com.start_loop())
+
+ @mock.patch("pcs.common.node_communicator.pycurl.Curl")
+ @mock.patch(
+ "pcs.common.node_communicator.pycurl.CurlMulti",
+ side_effect=lambda: MockCurlMulti([2, 0, 0, 1, 0, 1, 1])
+ )
+ def test_multiple(self, _, mock_curl):
+ com = self.get_communicator()
+ action = "action"
+ counter = {"counter": 0}
+ def _create_mock_curl():
+ counter["counter"] += 1
+ return (
+ MockCurl()
+ if counter["counter"] != 2
+ else MockCurl(error=(pycurl.E_SEND_ERROR, "reason"))
+ )
+ mock_curl.side_effect = _create_mock_curl
+ request_list = [fixture_request(i, action) for i in range(3)]
+ com.add_requests(request_list)
+ self.assertEqual(0, self.mock_com_log.log_request_start.call_count)
+ response_list = []
+ for response in com.start_loop():
+ if len(response_list) == 0:
+ request = fixture_request(3, action)
+ request_list.append(request)
+ com.add_requests([request])
+ elif len(response_list) == 3:
+ request = fixture_request(4, action)
+ request_list.append(request)
+ com.add_requests([request])
+ response_list.append(response)
+ self.assertEqual(len(request_list), len(response_list))
+ self.assertEqual(request_list, [r.request for r in response_list])
+ for i in range(len(request_list)):
+ self.assertEqual(i != 1, response_list[i].was_connected)
+ logger_calls = (
+ [mock.call.log_request_start(request_list[i]) for i in range(3)]
+ +
+ [
+ mock.call.log_response(response_list[0]),
+ mock.call.log_request_start(request_list[3]),
+ ]
+ +
+ [mock.call.log_response(response_list[i]) for i in range(1, 4)]
+ +
+ [
+ mock.call.log_request_start(request_list[4]),
+ mock.call.log_response(response_list[4]),
+ ]
+ )
+ self.assertEqual(logger_calls, self.mock_com_log.mock_calls)
+ com._multi_handle.assert_no_handle_left()
+
+
+def fixture_logger_request_retry_calls(response, host):
+ return [
+ mock.call.log_request_start(response.request),
+ mock.call.log_response(response),
+ mock.call.log_retry(response, host),
+ ]
+
+
+ at mock.patch.object(lib.Response, "connection_failure")
+ at mock.patch.object(lib.Response, "connection_successful")
+ at mock.patch(
+ "pcs.common.node_communicator.pycurl.CurlMulti",
+ side_effect=lambda: MockCurlMulti([1, 0, 1, 1, 1])
+)
+ at mock.patch("pcs.common.node_communicator._create_request_handle")
+class MultiaddressCommunicatorTest(CommunicatorBaseTest):
+ def test_success(
+ self, mock_create_handle, _, mock_con_successful, mock_con_failure
+ ):
+ com = self.get_multiaddress_communicator()
+ counter = {"counter": 0}
+ expected_response_list = []
+ def _con_successful(handle):
+ response = lib.Response(handle, True)
+ expected_response_list.append(response)
+ return response
+
+ def _con_failure(handle, errno, err_msg):
+ response = lib.Response(handle, False, errno, err_msg)
+ expected_response_list.append(response)
+ return response
+
+ def _mock_create_request_handle(request, _, __):
+ counter["counter"] += 1
+ return(
+ MockCurl(request=request)
+ if counter["counter"] > 2
+ else MockCurl(
+ error=(pycurl.E_SEND_ERROR, "reason"),
+ request=request,
+ )
+ )
+ mock_con_successful.side_effect = _con_successful
+ mock_con_failure.side_effect = _con_failure
+ mock_create_handle.side_effect = _mock_create_request_handle
+ request = lib.Request(
+ lib.RequestTarget("label", ["host{0}".format(i) for i in range(4)]),
+ lib.RequestData("action")
+ )
+ com.add_requests([request])
+ response_list = list(com.start_loop())
+ self.assertEqual(1, len(response_list))
+ response = response_list[0]
+ self.assertIs(response, expected_response_list[-1])
+ self.assertTrue(response.was_connected)
+ self.assertIs(request, response.request)
+ self.assertEqual("host2", request.host)
+ self.assertEqual(3, mock_create_handle.call_count)
+ self.assertEqual(3, len(expected_response_list))
+ mock_create_handle.assert_has_calls([
+ mock.call(request, {}, settings.default_request_timeout)
+ for _ in range(3)
+ ])
+ logger_calls = (
+ fixture_logger_request_retry_calls(
+ expected_response_list[0], "host0"
+ )
+ +
+ fixture_logger_request_retry_calls(
+ expected_response_list[1], "host1"
+ )
+ +
+ [
+ mock.call.log_request_start(request),
+ mock.call.log_response(response),
+ ]
+ )
+ self.assertEqual(logger_calls, self.mock_com_log.mock_calls)
+ com._multi_handle.assert_no_handle_left()
+
+ def test_failure(
+ self, mock_create_handle, _, mock_con_successful, mock_con_failure
+ ):
+ expected_response_list = []
+ def _con_failure(handle, errno, err_msg):
+ response = lib.Response(handle, False, errno, err_msg)
+ expected_response_list.append(response)
+ return response
+
+ mock_con_failure.side_effect = _con_failure
+ com = self.get_multiaddress_communicator()
+ mock_create_handle.side_effect = lambda request, _, __: MockCurl(
+ error=(pycurl.E_SEND_ERROR, "reason"), request=request,
+ )
+ request = lib.Request(
+ lib.RequestTarget("label", ["host{0}".format(i) for i in range(4)]),
+ lib.RequestData("action")
+ )
+ com.add_requests([request])
+ response_list = list(com.start_loop())
+ self.assertEqual(1, len(response_list))
+ response = response_list[0]
+ self.assertFalse(response.was_connected)
+ self.assertIs(request, response.request)
+ self.assertEqual("host3", request.host)
+ self.assertEqual(4, mock_create_handle.call_count)
+ mock_con_successful.assert_not_called()
+ self.assertEqual(4, len(expected_response_list))
+ mock_create_handle.assert_has_calls([
+ mock.call(request, {}, settings.default_request_timeout)
+ for _ in range(3)
+ ])
+ logger_calls = (
+ fixture_logger_request_retry_calls(
+ expected_response_list[0], "host0"
+ )
+ +
+ fixture_logger_request_retry_calls(
+ expected_response_list[1], "host1"
+ )
+ +
+ fixture_logger_request_retry_calls(
+ expected_response_list[2], "host2"
+ )
+ +
+ [
+ mock.call.log_request_start(request),
+ mock.call.log_response(response),
+ mock.call.log_no_more_addresses(response)
+ ]
+ )
+ self.assertEqual(logger_calls, self.mock_com_log.mock_calls)
+ com._multi_handle.assert_no_handle_left()
+
diff --git a/pcs/common/test/test_tools.py b/pcs/common/test/test_tools.py
index 8a042d0..0b28f84 100644
--- a/pcs/common/test/test_tools.py
+++ b/pcs/common/test/test_tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
index bd4ce21..8b46208 100644
--- a/pcs/common/tools.py
+++ b/pcs/common/tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/config.py b/pcs/config.py
index 5526eb5..ac32b66 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/constraint.py b/pcs/constraint.py
index 6e1a16f..3705c07 100644
--- a/pcs/constraint.py
+++ b/pcs/constraint.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
@@ -818,6 +817,8 @@ def location_add(argv,rm=False):
utils.err("invalid score '%s', use integer or INFINITY or -INFINITY" % score)
required_version = None
+ if [x for x in options if x[0] == "resource-discovery"]:
+ required_version = 2, 2, 0
if rsc_type == RESOURCE_TYPE_REGEXP:
required_version = 2, 6, 0
diff --git a/pcs/lib/booth/config_exchange.py b/pcs/lib/booth/config_exchange.py
index 377af1d..efdea42 100644
--- a/pcs/lib/booth/config_exchange.py
+++ b/pcs/lib/booth/config_exchange.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib.booth.config_structure import ConfigItem
diff --git a/pcs/lib/booth/config_files.py b/pcs/lib/booth/config_files.py
index 762cc96..1a17f20 100644
--- a/pcs/lib/booth/config_files.py
+++ b/pcs/lib/booth/config_files.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
diff --git a/pcs/lib/booth/config_parser.py b/pcs/lib/booth/config_parser.py
index bdc79fd..7dd819d 100644
--- a/pcs/lib/booth/config_parser.py
+++ b/pcs/lib/booth/config_parser.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import re
diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py
index e30fae3..f418e48 100644
--- a/pcs/lib/booth/config_structure.py
+++ b/pcs/lib/booth/config_structure.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import re
diff --git a/pcs/lib/booth/env.py b/pcs/lib/booth/env.py
index 97e215b..a91404a 100644
--- a/pcs/lib/booth/env.py
+++ b/pcs/lib/booth/env.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
diff --git a/pcs/lib/booth/reports.py b/pcs/lib/booth/reports.py
index 9bcb7ca..024211d 100644
--- a/pcs/lib/booth/reports.py
+++ b/pcs/lib/booth/reports.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
diff --git a/pcs/lib/booth/resource.py b/pcs/lib/booth/resource.py
index cf00d8f..6c0220c 100644
--- a/pcs/lib/booth/resource.py
+++ b/pcs/lib/booth/resource.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib.cib.tools import find_unique_id
diff --git a/pcs/lib/booth/status.py b/pcs/lib/booth/status.py
index 87cdc05..42eac6f 100644
--- a/pcs/lib/booth/status.py
+++ b/pcs/lib/booth/status.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs import settings
diff --git a/pcs/lib/booth/sync.py b/pcs/lib/booth/sync.py
index 374b96d..381201f 100644
--- a/pcs/lib/booth/sync.py
+++ b/pcs/lib/booth/sync.py
@@ -2,22 +2,16 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
-import json
import base64
from pcs.common import report_codes
from pcs.lib import reports as lib_reports
+from pcs.lib.communication.booth import BoothSaveFiles
+from pcs.lib.communication.tools import run_and_raise
from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
-from pcs.lib.external import (
- NodeCommunicator,
- NodeCommunicationException,
- node_communicator_exception_to_report_item,
- parallel_nodes_communication_helper,
-)
from pcs.lib.booth import (
config_files as booth_conf,
config_structure,
@@ -26,79 +20,10 @@ from pcs.lib.booth import (
)
-def _set_config_on_node(
- communicator, reporter, node, name, config_data, authfile=None,
- authfile_data=None
-):
- """
- Set booth config for instance 'name' on specified node.
-
- communicator -- NodeCommunicator
- reporter -- report processor
- node -- NodeAddresses
- name -- name of booth instance
- config_data -- booth config as string
- authfile -- path to authfile
- authfile_data -- authfile content as bytes
- """
- data = {
- "config": {
- "name": "{0}.conf".format(name),
- "data": config_data
- }
- }
- if authfile is not None and authfile_data is not None:
- data["authfile"] = {
- "name": os.path.basename(authfile),
- "data": base64.b64encode(authfile_data).decode("utf-8")
- }
- communicator.call_node(
- node,
- "remote/booth_set_config",
- NodeCommunicator.format_data_dict([("data_json", json.dumps(data))])
- )
- reporter.process(reports.booth_config_accepted_by_node(node.label, [name]))
-
-
-def send_config_to_all_nodes(
- communicator, reporter, node_list, name, config_data, authfile=None,
- authfile_data=None, skip_offline=False
-):
- """
- Send config_data of specified booth instance from local node to all nodes in
- node_list.
-
- communicator -- NodeCommunicator
- reporter -- report processor
- node_list -- NodeAddressesList
- name -- name of booth instance
- config_data -- config_data content as string
- authfile -- path to authfile
- authfile_data -- content of authfile as bytes
- skip_offline -- if True offline nodes will be skipped
- """
- reporter.process(reports.booth_config_distribution_started())
- parallel_nodes_communication_helper(
- _set_config_on_node,
- [
- (
- [
- communicator, reporter, node, name, config_data,
- authfile, authfile_data
- ],
- {}
- )
- for node in node_list
- ],
- reporter,
- skip_offline
- )
-
-
def send_all_config_to_node(
communicator,
reporter,
- node,
+ target,
rewrite_existing=False,
skip_wrong_config=False
):
@@ -143,17 +68,12 @@ def send_all_config_to_node(
config, "unable to parse config"
))
- data = [("data_json", json.dumps(file_list))]
-
- if rewrite_existing:
- data.append(("rewrite_existing", "1"))
-
+ com_cmd = BoothSaveFiles(
+ reporter, file_list, rewrite_existing=rewrite_existing
+ )
+ com_cmd.set_targets([target])
+ response = run_and_raise(communicator, com_cmd)[0][1]
try:
- response = json.loads(communicator.call_node(
- node,
- "remote/booth_save_files",
- NodeCommunicator.format_data_dict(data)
- ))
report_list = []
for file in response["existing"]:
report_list.append(lib_reports.file_already_exists(
@@ -164,47 +84,16 @@ def send_all_config_to_node(
None if rewrite_existing
else report_codes.FORCE_FILE_OVERWRITE
),
- node.label
+ target.label
))
for file, reason in response["failed"].items():
report_list.append(reports.booth_config_distribution_node_error(
- node.label, reason, file
+ target.label, reason, file
))
reporter.process_list(report_list)
reporter.process(
- reports.booth_config_accepted_by_node(node.label, response["saved"])
+ reports.booth_config_accepted_by_node(target.label, response["saved"])
)
- except NodeCommunicationException as e:
- raise LibraryError(node_communicator_exception_to_report_item(e))
except (KeyError, ValueError):
- raise LibraryError(lib_reports.invalid_response_format(node.label))
-
-
-def pull_config_from_node(communicator, node, name):
- """
- Get config of specified booth instance and its authfile if there is one
- from 'node'. It returns dictionary with format:
- {
- "config": {
- "name": <file name of config>,
- "data": <content of file>
- },
- "authfile": {
- "name": <file name of authfile, None if it doesn't exist>,
- "data": <base64 coded content of authfile>
- }
+ raise LibraryError(lib_reports.invalid_response_format(target.label))
- communicator -- NodeCommunicator
- node -- NodeAddresses
- name -- name of booth instance
- """
- try:
- return json.loads(communicator.call_node(
- node,
- "remote/booth_get_config",
- NodeCommunicator.format_data_dict([("name", name)])
- ))
- except NodeCommunicationException as e:
- raise LibraryError(node_communicator_exception_to_report_item(e))
- except ValueError:
- raise LibraryError(lib_reports.invalid_response_format(node.label))
diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py
index 9717a96..d7d3f73 100644
--- a/pcs/lib/booth/test/test_config_exchange.py
+++ b/pcs/lib/booth/test/test_config_exchange.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
from pcs.lib.booth import config_structure, config_exchange
diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py
index d0df256..344884c 100644
--- a/pcs/lib/booth/test/test_config_files.py
+++ b/pcs/lib/booth/test/test_config_files.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os.path
diff --git a/pcs/lib/booth/test/test_config_parser.py b/pcs/lib/booth/test/test_config_parser.py
index c04f451..4dc0ee5 100644
--- a/pcs/lib/booth/test/test_config_parser.py
+++ b/pcs/lib/booth/test/test_config_parser.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
index a611f10..3d89853 100644
--- a/pcs/lib/booth/test/test_config_structure.py
+++ b/pcs/lib/booth/test/test_config_structure.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/booth/test/test_env.py b/pcs/lib/booth/test/test_env.py
index c14afbf..85fc1ee 100644
--- a/pcs/lib/booth/test/test_env.py
+++ b/pcs/lib/booth/test/test_env.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py
index 929c33b..7e306b5 100644
--- a/pcs/lib/booth/test/test_resource.py
+++ b/pcs/lib/booth/test/test_resource.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py
index dfb7354..c08cd18 100644
--- a/pcs/lib/booth/test/test_status.py
+++ b/pcs/lib/booth/test/test_status.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py
index 447f894..8a8d97a 100644
--- a/pcs/lib/booth/test/test_sync.py
+++ b/pcs/lib/booth/test/test_sync.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
@@ -16,7 +15,7 @@ except ImportError:
# python 3
from urllib.parse import parse_qs as url_decode
-from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.pcs_unittest import mock, skip
from pcs.test.tools.assertions import (
assert_report_item_list_equal,
assert_raise_library_error,
@@ -24,6 +23,7 @@ from pcs.test.tools.assertions import (
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.common import report_codes
+from pcs.common.node_communicator import RequestTarget
from pcs.lib.node import NodeAddresses, NodeAddressesList
from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
from pcs.lib.external import NodeCommunicator, NodeConnectionException
@@ -33,7 +33,7 @@ import pcs.lib.booth.sync as lib
def to_b64(string):
return base64.b64encode(string.encode("utf-8")).decode("utf-8")
-
+ at skip("TODO: rewrite for pcs.lib.communication.booth.BoothSendConfig")
class SetConfigOnNodeTest(TestCase):
def setUp(self):
self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
@@ -131,7 +131,7 @@ class SetConfigOnNodeTest(TestCase):
)
self._assert()
-
+ at skip("TODO: rewrite for pcs.lib.communication.booth.BoothSaveFiles")
@mock.patch("pcs.lib.booth.sync.parallel_nodes_communication_helper")
class SyncConfigInCluster(TestCase):
def setUp(self):
@@ -255,16 +255,38 @@ class SyncConfigInCluster(TestCase):
)]
)
-
+ at mock.patch("pcs.lib.booth.sync.run_and_raise")
@mock.patch("pcs.lib.booth.config_structure.get_authfile")
@mock.patch("pcs.lib.booth.config_parser.parse")
@mock.patch("pcs.lib.booth.config_files.read_configs")
@mock.patch("pcs.lib.booth.config_files.read_authfile")
class SendAllConfigToNodeTest(TestCase):
def setUp(self):
- self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+ self.mock_com = "communicator"
self.mock_reporter = MockLibraryReportProcessor()
- self.node = NodeAddresses("node")
+ self.node = RequestTarget("node")
+ self.file_list = [
+ {
+ "name": "name1.conf",
+ "data": "config1",
+ "is_authfile": False
+ },
+ {
+ "name": "file1.key",
+ "data": to_b64("some key"),
+ "is_authfile": True
+ },
+ {
+ "name": "name2.conf",
+ "data": "config2",
+ "is_authfile": False
+ },
+ {
+ "name": "file2.key",
+ "data": to_b64("another key"),
+ "is_authfile": True
+ }
+ ]
@staticmethod
def mock_parse_fn(config_content):
@@ -299,7 +321,8 @@ class SendAllConfigToNodeTest(TestCase):
return _data[authfile_path]
def test_success(
- self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile,
+ mock_run_com,
):
mock_parse.side_effect = self.mock_parse_fn
mock_authfile.side_effect = self.mock_authfile_fn
@@ -308,15 +331,16 @@ class SendAllConfigToNodeTest(TestCase):
"name1.conf": "config1",
"name2.conf": "config2"
}
- self.mock_communicator.call_node.return_value = """
- {
- "existing": [],
- "failed": {},
- "saved": ["name1.conf", "file1.key", "name2.conf", "file2.key"]
- }
- """
+ mock_run_com.return_value = [(
+ self.node,
+ {
+ "existing": [],
+ "failed": {},
+ "saved": ["name1.conf", "file1.key", "name2.conf", "file2.key"]
+ }
+ )]
lib.send_all_config_to_node(
- self.mock_communicator, self.mock_reporter, self.node
+ self.mock_com, self.mock_reporter, self.node
)
self.assertEqual(2, mock_parse.call_count)
mock_parse.assert_has_calls([
@@ -332,42 +356,10 @@ class SendAllConfigToNodeTest(TestCase):
mock.call(self.mock_reporter, "/path/to/file2.key")
])
mock_read_configs.assert_called_once_with(self.mock_reporter, False)
- self.assertEqual(1, self.mock_communicator.call_node.call_count)
- self.assertEqual(
- self.node, self.mock_communicator.call_node.call_args[0][0]
- )
- self.assertEqual(
- "remote/booth_save_files",
- self.mock_communicator.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_communicator.call_node.call_args[0][2])
- self.assertFalse("rewrite_existing" in data)
- self.assertTrue("data_json" in data)
- self.assertEqual(
- [
- {
- "name": "name1.conf",
- "data": "config1",
- "is_authfile": False
- },
- {
- "name": "file1.key",
- "data": to_b64("some key"),
- "is_authfile": True
- },
- {
- "name": "name2.conf",
- "data": "config2",
- "is_authfile": False
- },
- {
- "name": "file2.key",
- "data": to_b64("another key"),
- "is_authfile": True
- }
- ],
- json.loads(data["data_json"][0])
- )
+ communicator, com_cmd = mock_run_com.call_args[0]
+ self.assertEqual(self.mock_com, communicator)
+ self.assertEqual(self.file_list, com_cmd._file_list)
+ self.assertFalse(com_cmd._rewrite_existing)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -390,7 +382,8 @@ class SendAllConfigToNodeTest(TestCase):
)
def test_do_not_rewrite_existing(
- self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile,
+ mock_run_com,
):
mock_parse.side_effect = self.mock_parse_fn
mock_authfile.side_effect = self.mock_authfile_fn
@@ -399,16 +392,17 @@ class SendAllConfigToNodeTest(TestCase):
"name1.conf": "config1",
"name2.conf": "config2"
}
- self.mock_communicator.call_node.return_value = """
- {
- "existing": ["name1.conf", "file1.key"],
- "failed": {},
- "saved": ["name2.conf", "file2.key"]
- }
- """
+ mock_run_com.return_value = [(
+ self.node,
+ {
+ "existing": ["name1.conf", "file1.key"],
+ "failed": {},
+ "saved": ["name2.conf", "file2.key"]
+ }
+ )]
assert_raise_library_error(
lambda: lib.send_all_config_to_node(
- self.mock_communicator, self.mock_reporter, self.node
+ self.mock_com, self.mock_reporter, self.node
),
(
Severities.ERROR,
@@ -445,42 +439,10 @@ class SendAllConfigToNodeTest(TestCase):
mock.call(self.mock_reporter, "/path/to/file2.key")
])
mock_read_configs.assert_called_once_with(self.mock_reporter, False)
- self.assertEqual(1, self.mock_communicator.call_node.call_count)
- self.assertEqual(
- self.node, self.mock_communicator.call_node.call_args[0][0]
- )
- self.assertEqual(
- "remote/booth_save_files",
- self.mock_communicator.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_communicator.call_node.call_args[0][2])
- self.assertFalse("rewrite_existing" in data)
- self.assertTrue("data_json" in data)
- self.assertEqual(
- [
- {
- "name": "name1.conf",
- "data": "config1",
- "is_authfile": False
- },
- {
- "name": "file1.key",
- "data": to_b64("some key"),
- "is_authfile": True
- },
- {
- "name": "name2.conf",
- "data": "config2",
- "is_authfile": False
- },
- {
- "name": "file2.key",
- "data": to_b64("another key"),
- "is_authfile": True
- }
- ],
- json.loads(data["data_json"][0])
- )
+ communicator, com_cmd = mock_run_com.call_args[0]
+ self.assertEqual(self.mock_com, communicator)
+ self.assertEqual(self.file_list, com_cmd._file_list)
+ self.assertFalse(com_cmd._rewrite_existing)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -513,7 +475,8 @@ class SendAllConfigToNodeTest(TestCase):
)
def test_rewrite_existing(
- self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile,
+ mock_run_com,
):
mock_parse.side_effect = self.mock_parse_fn
mock_authfile.side_effect = self.mock_authfile_fn
@@ -522,15 +485,16 @@ class SendAllConfigToNodeTest(TestCase):
"name1.conf": "config1",
"name2.conf": "config2"
}
- self.mock_communicator.call_node.return_value = """
- {
- "existing": ["name1.conf", "file1.key"],
- "failed": {},
- "saved": ["name2.conf", "file2.key"]
- }
- """
+ mock_run_com.return_value = [(
+ self.node,
+ {
+ "existing": ["name1.conf", "file1.key"],
+ "failed": {},
+ "saved": ["name2.conf", "file2.key"]
+ }
+ )]
lib.send_all_config_to_node(
- self.mock_communicator,
+ self.mock_com,
self.mock_reporter,
self.node,
rewrite_existing=True
@@ -549,42 +513,10 @@ class SendAllConfigToNodeTest(TestCase):
mock.call(self.mock_reporter, "/path/to/file1.key"),
mock.call(self.mock_reporter, "/path/to/file2.key")
])
- self.assertEqual(1, self.mock_communicator.call_node.call_count)
- self.assertEqual(
- self.node, self.mock_communicator.call_node.call_args[0][0]
- )
- self.assertEqual(
- "remote/booth_save_files",
- self.mock_communicator.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_communicator.call_node.call_args[0][2])
- self.assertTrue("rewrite_existing" in data)
- self.assertTrue("data_json" in data)
- self.assertEqual(
- [
- {
- "name": "name1.conf",
- "data": "config1",
- "is_authfile": False
- },
- {
- "name": "file1.key",
- "data": to_b64("some key"),
- "is_authfile": True
- },
- {
- "name": "name2.conf",
- "data": "config2",
- "is_authfile": False
- },
- {
- "name": "file2.key",
- "data": to_b64("another key"),
- "is_authfile": True
- }
- ],
- json.loads(data["data_json"][0])
- )
+ communicator, com_cmd = mock_run_com.call_args[0]
+ self.assertEqual(self.mock_com, communicator)
+ self.assertEqual(self.file_list, com_cmd._file_list)
+ self.assertTrue(com_cmd._rewrite_existing)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -623,7 +555,8 @@ class SendAllConfigToNodeTest(TestCase):
)
def test_write_failure(
- self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile,
+ mock_run_com,
):
mock_parse.side_effect = self.mock_parse_fn
mock_authfile.side_effect = self.mock_authfile_fn
@@ -632,19 +565,20 @@ class SendAllConfigToNodeTest(TestCase):
"name1.conf": "config1",
"name2.conf": "config2"
}
- self.mock_communicator.call_node.return_value = """
- {
- "existing": [],
- "failed": {
- "name1.conf": "Error message",
- "file1.key": "Another error message"
- },
- "saved": ["name2.conf", "file2.key"]
- }
- """
+ mock_run_com.return_value = [(
+ self.node,
+ {
+ "existing": [],
+ "failed": {
+ "name1.conf": "Error message",
+ "file1.key": "Another error message"
+ },
+ "saved": ["name2.conf", "file2.key"]
+ }
+ )]
assert_raise_library_error(
lambda: lib.send_all_config_to_node(
- self.mock_communicator, self.mock_reporter, self.node
+ self.mock_com, self.mock_reporter, self.node
),
(
Severities.ERROR,
@@ -679,42 +613,10 @@ class SendAllConfigToNodeTest(TestCase):
mock.call(self.mock_reporter, "/path/to/file2.key")
])
mock_read_configs.assert_called_once_with(self.mock_reporter, False)
- self.assertEqual(1, self.mock_communicator.call_node.call_count)
- self.assertEqual(
- self.node, self.mock_communicator.call_node.call_args[0][0]
- )
- self.assertEqual(
- "remote/booth_save_files",
- self.mock_communicator.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_communicator.call_node.call_args[0][2])
- self.assertFalse("rewrite_existing" in data)
- self.assertTrue("data_json" in data)
- self.assertEqual(
- [
- {
- "name": "name1.conf",
- "data": "config1",
- "is_authfile": False
- },
- {
- "name": "file1.key",
- "data": to_b64("some key"),
- "is_authfile": True
- },
- {
- "name": "name2.conf",
- "data": "config2",
- "is_authfile": False
- },
- {
- "name": "file2.key",
- "data": to_b64("another key"),
- "is_authfile": True
- }
- ],
- json.loads(data["data_json"][0])
- )
+ communicator, com_cmd = mock_run_com.call_args[0]
+ self.assertEqual(self.mock_com, communicator)
+ self.assertEqual(self.file_list, com_cmd._file_list)
+ self.assertFalse(com_cmd._rewrite_existing)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -744,6 +646,7 @@ class SendAllConfigToNodeTest(TestCase):
]
)
+ @skip("TODO: rewrite for pcs.lib.communication.booth.BoothSaveFiles")
def test_communication_failure(
self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
):
@@ -822,6 +725,7 @@ class SendAllConfigToNodeTest(TestCase):
json.loads(data["data_json"][0])
)
+ @skip("TODO: rewrite for pcs.lib.communication.booth.BoothSaveFiles")
def test_wrong_response_format(
self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
):
@@ -903,6 +807,7 @@ class SendAllConfigToNodeTest(TestCase):
json.loads(data["data_json"][0])
)
+ @skip("TODO: rewrite for pcs.lib.communication.booth.BoothSaveFiles")
def test_response_not_json(
self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
):
@@ -975,9 +880,9 @@ class SendAllConfigToNodeTest(TestCase):
json.loads(data["data_json"][0])
)
-
def test_configs_without_authfiles(
- self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile,
+ mock_run_com
):
def mock_authfile_fn(parsed_config):
if parsed_config == "config1":
@@ -996,15 +901,16 @@ class SendAllConfigToNodeTest(TestCase):
"name1.conf": "config1",
"name2.conf": "config2"
}
- self.mock_communicator.call_node.return_value = """
- {
- "existing": [],
- "failed": {},
- "saved": ["name1.conf", "name2.conf", "file2.key"]
- }
- """
+ mock_run_com.return_value = [(
+ self.node,
+ {
+ "existing": [],
+ "failed": {},
+ "saved": ["name1.conf", "name2.conf", "file2.key"]
+ }
+ )]
lib.send_all_config_to_node(
- self.mock_communicator, self.mock_reporter, self.node
+ self.mock_com, self.mock_reporter, self.node
)
self.assertEqual(2, mock_parse.call_count)
mock_parse.assert_has_calls([
@@ -1018,37 +924,27 @@ class SendAllConfigToNodeTest(TestCase):
self.mock_reporter, "/path/to/file2.key"
)
mock_read_configs.assert_called_once_with(self.mock_reporter, False)
- self.assertEqual(1, self.mock_communicator.call_node.call_count)
- self.assertEqual(
- self.node, self.mock_communicator.call_node.call_args[0][0]
- )
- self.assertEqual(
- "remote/booth_save_files",
- self.mock_communicator.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_communicator.call_node.call_args[0][2])
- self.assertFalse("rewrite_existing" in data)
- self.assertTrue("data_json" in data)
- self.assertEqual(
- [
- {
- "name": "name1.conf",
- "data": "config1",
- "is_authfile": False
- },
- {
- "name": "name2.conf",
- "data": "config2",
- "is_authfile": False
- },
- {
- "name": "file2.key",
- "data": to_b64("another key"),
- "is_authfile": True
- }
- ],
- json.loads(data["data_json"][0])
- )
+ expected_file_list = [
+ {
+ "name": "name1.conf",
+ "data": "config1",
+ "is_authfile": False
+ },
+ {
+ "name": "name2.conf",
+ "data": "config2",
+ "is_authfile": False
+ },
+ {
+ "name": "file2.key",
+ "data": to_b64("another key"),
+ "is_authfile": True
+ }
+ ]
+ communicator, com_cmd = mock_run_com.call_args[0]
+ self.assertEqual(self.mock_com, communicator)
+ self.assertEqual(expected_file_list, com_cmd._file_list)
+ self.assertFalse(com_cmd._rewrite_existing)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -1069,7 +965,8 @@ class SendAllConfigToNodeTest(TestCase):
)
def test_unable_to_parse_config(
- self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile,
+ mock_run_com,
):
def mock_parse_fn(config_data):
if config_data == "config1":
@@ -1088,15 +985,16 @@ class SendAllConfigToNodeTest(TestCase):
"name1.conf": "config1",
"name2.conf": "config2"
}
- self.mock_communicator.call_node.return_value = """
- {
- "existing": [],
- "failed": {},
- "saved": ["name2.conf", "file2.key"]
- }
- """
+ mock_run_com.return_value = [(
+ self.node,
+ {
+ "existing": [],
+ "failed": {},
+ "saved": ["name2.conf", "file2.key"]
+ }
+ )]
lib.send_all_config_to_node(
- self.mock_communicator, self.mock_reporter, self.node
+ self.mock_com, self.mock_reporter, self.node
)
self.assertEqual(2, mock_parse.call_count)
mock_parse.assert_has_calls([
@@ -1107,32 +1005,22 @@ class SendAllConfigToNodeTest(TestCase):
self.mock_reporter, "/path/to/file2.key"
)
mock_read_configs.assert_called_once_with(self.mock_reporter, False)
- self.assertEqual(1, self.mock_communicator.call_node.call_count)
- self.assertEqual(
- self.node, self.mock_communicator.call_node.call_args[0][0]
- )
- self.assertEqual(
- "remote/booth_save_files",
- self.mock_communicator.call_node.call_args[0][1]
- )
- data = url_decode(self.mock_communicator.call_node.call_args[0][2])
- self.assertFalse("rewrite_existing" in data)
- self.assertTrue("data_json" in data)
- self.assertEqual(
- [
- {
- "name": "name2.conf",
- "data": "config2",
- "is_authfile": False
- },
- {
- "name": "file2.key",
- "data": to_b64("another key"),
- "is_authfile": True
- }
- ],
- json.loads(data["data_json"][0])
- )
+ expected_file_list = [
+ {
+ "name": "name2.conf",
+ "data": "config2",
+ "is_authfile": False
+ },
+ {
+ "name": "file2.key",
+ "data": to_b64("another key"),
+ "is_authfile": True
+ }
+ ]
+ communicator, com_cmd = mock_run_com.call_args[0]
+ self.assertEqual(self.mock_com, communicator)
+ self.assertEqual(expected_file_list, com_cmd._file_list)
+ self.assertFalse(com_cmd._rewrite_existing)
assert_report_item_list_equal(
self.mock_reporter.report_item_list,
[
@@ -1159,7 +1047,7 @@ class SendAllConfigToNodeTest(TestCase):
]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.booth.BoothGetConfig")
class PullConfigFromNodeTest(TestCase):
def setUp(self):
self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
diff --git a/pcs/lib/cib/acl.py b/pcs/lib/cib/acl.py
index 073763b..59fdf13 100644
--- a/pcs/lib/cib/acl.py
+++ b/pcs/lib/cib/acl.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py
index 3ed7186..625ac51 100644
--- a/pcs/lib/cib/alert.py
+++ b/pcs/lib/cib/alert.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/constraint/colocation.py b/pcs/lib/cib/constraint/colocation.py
index 3f121c2..8006a36 100644
--- a/pcs/lib/cib/constraint/colocation.py
+++ b/pcs/lib/cib/constraint/colocation.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/constraint/constraint.py b/pcs/lib/cib/constraint/constraint.py
index d7c16bc..f5075f8 100644
--- a/pcs/lib/cib/constraint/constraint.py
+++ b/pcs/lib/cib/constraint/constraint.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
diff --git a/pcs/lib/cib/constraint/order.py b/pcs/lib/cib/constraint/order.py
index 7823bcd..4c520ae 100644
--- a/pcs/lib/cib/constraint/order.py
+++ b/pcs/lib/cib/constraint/order.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/constraint/resource_set.py b/pcs/lib/cib/constraint/resource_set.py
index 1ef1c6c..8eba876 100644
--- a/pcs/lib/cib/constraint/resource_set.py
+++ b/pcs/lib/cib/constraint/resource_set.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py
index ad5a64f..a2dbd5f 100644
--- a/pcs/lib/cib/constraint/ticket.py
+++ b/pcs/lib/cib/constraint/ticket.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
@@ -13,6 +12,7 @@ from pcs.lib import reports
from pcs.lib.cib.constraint import constraint
from pcs.lib.cib import tools
from pcs.lib.errors import LibraryError
+from pcs.lib.xml_tools import remove_when_pointless
TAG_NAME = 'rsc_ticket'
DESCRIPTION = "constraint id"
@@ -127,8 +127,10 @@ def remove_with_resource_set(constraint_section, ticket_key, resource_id):
if not len(set_element):
ticket_element = set_element.getparent()
ticket_element.remove(set_element)
- if not len(ticket_element):
- ticket_element.getparent().remove(ticket_element)
+ #We do not care about attributes since without an attribute "rsc"
+ #they are pointless. Attribute "rsc" is mutually exclusive with
+ #resource_set (see rng) so it cannot be in this ticket_element.
+ remove_when_pointless(ticket_element, attribs_important=False)
return len(ref_element_list) > 0
diff --git a/pcs/lib/cib/fencing_topology.py b/pcs/lib/cib/fencing_topology.py
index 7e2feb6..003178b 100644
--- a/pcs/lib/cib/fencing_topology.py
+++ b/pcs/lib/cib/fencing_topology.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/node.py b/pcs/lib/cib/node.py
index c5b059f..627c014 100644
--- a/pcs/lib/cib/node.py
+++ b/pcs/lib/cib/node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
index d3f5a5c..c88a96b 100644
--- a/pcs/lib/cib/nvpair.py
+++ b/pcs/lib/cib/nvpair.py
@@ -2,14 +2,16 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
from functools import partial
from pcs.lib.cib.tools import create_subelement_id
-from pcs.lib.xml_tools import get_sub_element
+from pcs.lib.xml_tools import(
+ get_sub_element,
+ remove_when_pointless,
+)
def _append_new_nvpair(nvset_element, name, value, id_provider=None):
"""
@@ -47,7 +49,7 @@ def set_nvpair_in_nvset(nvset_element, name, value):
else:
nvset_element.remove(nvpair)
-def arrange_first_nvset(tag_name, context_element, nvpair_dict):
+def arrange_first_nvset(tag_name, context_element, nvpair_dict, new_id=None):
"""
Put nvpairs to the first tag_name nvset in the context_element.
@@ -68,7 +70,7 @@ def arrange_first_nvset(tag_name, context_element, nvpair_dict):
nvset_element = get_sub_element(
context_element,
tag_name,
- create_subelement_id(context_element, tag_name),
+ new_id if new_id else create_subelement_id(context_element, tag_name),
new_index=0
)
@@ -111,9 +113,7 @@ def update_nvset(nvset_element, nvpair_dict):
"""
for name, value in sorted(nvpair_dict.items()):
set_nvpair_in_nvset(nvset_element, name, value)
- # remove an empty nvset
- if not list(nvset_element):
- nvset_element.getparent().remove(nvset_element)
+ remove_when_pointless(nvset_element)
def get_nvset(nvset):
"""
diff --git a/pcs/lib/cib/resource/__init__.py b/pcs/lib/cib/resource/__init__.py
index 803bb8b..620af42 100644
--- a/pcs/lib/cib/resource/__init__.py
+++ b/pcs/lib/cib/resource/__init__.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib.cib.resource import (
diff --git a/pcs/lib/cib/resource/bundle.py b/pcs/lib/cib/resource/bundle.py
index 8a49c28..a159c1d 100644
--- a/pcs/lib/cib/resource/bundle.py
+++ b/pcs/lib/cib/resource/bundle.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
@@ -23,6 +22,7 @@ from pcs.lib.pacemaker.values import sanitize_id
from pcs.lib.xml_tools import (
get_sub_element,
update_attributes_remove_empty,
+ remove_when_pointless,
)
TAG = "bundle"
@@ -267,9 +267,8 @@ def update(
# remove empty elements with no attributes
# meta attributes are handled in their own function
- for element in (network_element, storage_element):
- if len(element) < 1 and not element.attrib:
- element.getparent().remove(element)
+ remove_when_pointless(network_element)
+ remove_when_pointless(storage_element)
def add_resource(bundle_element, primitive_element):
"""
diff --git a/pcs/lib/cib/resource/clone.py b/pcs/lib/cib/resource/clone.py
index cf0bca4..7f105f8 100644
--- a/pcs/lib/cib/resource/clone.py
+++ b/pcs/lib/cib/resource/clone.py
@@ -7,7 +7,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py
index 0e52b4c..cc4bb1d 100644
--- a/pcs/lib/cib/resource/common.py
+++ b/pcs/lib/cib/resource/common.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
diff --git a/pcs/lib/cib/resource/group.py b/pcs/lib/cib/resource/group.py
index 75a34ee..37c9df0 100644
--- a/pcs/lib/cib/resource/group.py
+++ b/pcs/lib/cib/resource/group.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/resource/guest_node.py b/pcs/lib/cib/resource/guest_node.py
index cead431..3a3473f 100644
--- a/pcs/lib/cib/resource/guest_node.py
+++ b/pcs/lib/cib/resource/guest_node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib import reports, validate
@@ -17,6 +16,7 @@ from pcs.lib.node import (
node_addresses_contain_host,
node_addresses_contain_name,
)
+from pcs.lib.xml_tools import remove_when_pointless
#TODO pcs currently does not care about multiple meta_attributes and here
@@ -144,8 +144,7 @@ def unset_guest(resource_element):
for nvpair in guest_nvpair_list:
meta_attributes = nvpair.getparent()
meta_attributes.remove(nvpair)
- if not len(meta_attributes):
- meta_attributes.getparent().remove(meta_attributes)
+ remove_when_pointless(meta_attributes)
def get_node(meta_attributes):
"""
diff --git a/pcs/lib/cib/resource/operations.py b/pcs/lib/cib/resource/operations.py
index a34db45..75329cf 100644
--- a/pcs/lib/cib/resource/operations.py
+++ b/pcs/lib/cib/resource/operations.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from collections import defaultdict
diff --git a/pcs/lib/cib/resource/primitive.py b/pcs/lib/cib/resource/primitive.py
index 0560182..b386850 100644
--- a/pcs/lib/cib/resource/primitive.py
+++ b/pcs/lib/cib/resource/primitive.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/resource/remote_node.py b/pcs/lib/cib/resource/remote_node.py
index 36db850..423ce83 100644
--- a/pcs/lib/cib/resource/remote_node.py
+++ b/pcs/lib/cib/resource/remote_node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
diff --git a/pcs/lib/cib/sections.py b/pcs/lib/cib/sections.py
new file mode 100644
index 0000000..f48d644
--- /dev/null
+++ b/pcs/lib/cib/sections.py
@@ -0,0 +1,64 @@
+"""
+This module defines madatory and optional cib sections. It provides function for
+getting existing section from the cib (lxml) tree.
+"""
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.xml_tools import get_sub_element
+
+
+CONFIGURATION = "configuration"
+CONSTRAINTS = "configuration/constraints"
+NODES = "configuration/nodes"
+RESOURCES = "configuration/resources"
+
+ACLS = "acls"
+ALERTS = "alerts"
+FENCING_TOPOLOGY = "fencing-topology"
+OP_DEFAULTS = "op_defaults"
+RSC_DEFAULTS = "rsc_defaults"
+
+__MANDATORY_SECTIONS = [
+ CONFIGURATION,
+ CONSTRAINTS,
+ NODES,
+ RESOURCES,
+]
+
+__OPTIONAL_SECTIONS = [
+ ACLS,
+ ALERTS,
+ FENCING_TOPOLOGY,
+ OP_DEFAULTS,
+ RSC_DEFAULTS,
+]
+
+def get(tree, section_name):
+ """
+ Return the element which represents section 'section_name' in the tree.
+
+ If the section is mandatory and is not found in the tree this function
+ raises.
+ If the section is optional and is not found in the tree this function
+ creates new section.
+
+ lxml.etree.Element tree -- is tree in which the section is looked up
+ string section_name -- name of desired section; it is strongly recommended
+ to use constants defined in this module
+ """
+ if section_name in __MANDATORY_SECTIONS:
+ section = tree.find(".//{0}".format(section_name))
+ if section is not None:
+ return section
+ raise LibraryError(reports.cib_missing_mandatory_section(section_name))
+
+ if section_name in __OPTIONAL_SECTIONS:
+ return get_sub_element(get(tree, CONFIGURATION), section_name)
+
+ raise AssertionError("Unknown cib section '{0}'".format(section_name))
diff --git a/pcs/lib/cib/stonith.py b/pcs/lib/cib/stonith.py
index d588c4d..47f7555 100644
--- a/pcs/lib/cib/stonith.py
+++ b/pcs/lib/cib/stonith.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
# TODO replace by the new finding function
diff --git a/pcs/lib/cib/test/test_acl.py b/pcs/lib/cib/test/test_acl.py
index 7171a32..cda52bb 100644
--- a/pcs/lib/cib/test/test_acl.py
+++ b/pcs/lib/cib/test/test_acl.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
index ca0bd2b..37eea67 100644
--- a/pcs/lib/cib/test/test_alert.py
+++ b/pcs/lib/cib/test/test_alert.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py
index fe9cc42..00c8ea8 100644
--- a/pcs/lib/cib/test/test_constraint.py
+++ b/pcs/lib/cib/test/test_constraint.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py
index 1b38253..56d74df 100644
--- a/pcs/lib/cib/test/test_constraint_colocation.py
+++ b/pcs/lib/cib/test/test_constraint_colocation.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py
index 90373b2..e273016 100644
--- a/pcs/lib/cib/test/test_constraint_order.py
+++ b/pcs/lib/cib/test/test_constraint_order.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
index 5a39388..6026e3e 100644
--- a/pcs/lib/cib/test/test_constraint_ticket.py
+++ b/pcs/lib/cib/test/test_constraint_ticket.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/test/test_fencing_topology.py b/pcs/lib/cib/test/test_fencing_topology.py
index 52cf724..b95266d 100644
--- a/pcs/lib/cib/test/test_fencing_topology.py
+++ b/pcs/lib/cib/test/test_fencing_topology.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_node.py b/pcs/lib/cib/test/test_node.py
index bd5a309..693976b 100644
--- a/pcs/lib/cib/test/test_node.py
+++ b/pcs/lib/cib/test/test_node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
index 0f6d8f8..89f37c3 100644
--- a/pcs/lib/cib/test/test_nvpair.py
+++ b/pcs/lib/cib/test/test_nvpair.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_resource_bundle.py b/pcs/lib/cib/test/test_resource_bundle.py
index 37b1d8e..c54f89e 100644
--- a/pcs/lib/cib/test/test_resource_bundle.py
+++ b/pcs/lib/cib/test/test_resource_bundle.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_resource_clone.py b/pcs/lib/cib/test/test_resource_clone.py
index dcdbb9a..801a4ac 100644
--- a/pcs/lib/cib/test/test_resource_clone.py
+++ b/pcs/lib/cib/test/test_resource_clone.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_resource_common.py b/pcs/lib/cib/test/test_resource_common.py
index 6b485f7..545818e 100644
--- a/pcs/lib/cib/test/test_resource_common.py
+++ b/pcs/lib/cib/test/test_resource_common.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_resource_group.py b/pcs/lib/cib/test/test_resource_group.py
index e128295..ffba240 100644
--- a/pcs/lib/cib/test/test_resource_group.py
+++ b/pcs/lib/cib/test/test_resource_group.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_resource_guest_node.py b/pcs/lib/cib/test/test_resource_guest_node.py
index d9f0b72..64932c3 100644
--- a/pcs/lib/cib/test/test_resource_guest_node.py
+++ b/pcs/lib/cib/test/test_resource_guest_node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_resource_operations.py b/pcs/lib/cib/test/test_resource_operations.py
index 42fa49d..72f9e40 100644
--- a/pcs/lib/cib/test/test_resource_operations.py
+++ b/pcs/lib/cib/test/test_resource_operations.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/test/test_resource_primitive.py b/pcs/lib/cib/test/test_resource_primitive.py
index ed6ee63..26be8df 100644
--- a/pcs/lib/cib/test/test_resource_primitive.py
+++ b/pcs/lib/cib/test/test_resource_primitive.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/test/test_resource_remote_node.py b/pcs/lib/cib/test/test_resource_remote_node.py
index dd3569b..4d0942d 100644
--- a/pcs/lib/cib/test/test_resource_remote_node.py
+++ b/pcs/lib/cib/test/test_resource_remote_node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py
index 05f5831..9282b19 100644
--- a/pcs/lib/cib/test/test_resource_set.py
+++ b/pcs/lib/cib/test/test_resource_set.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/cib/test/test_sections.py b/pcs/lib/cib/test/test_sections.py
new file mode 100644
index 0000000..24661fa
--- /dev/null
+++ b/pcs/lib/cib/test/test_sections.py
@@ -0,0 +1,78 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib import sections
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import(
+ assert_xml_equal,
+ assert_raise_library_error
+)
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.xml import etree_to_str
+
+
+class Get(TestCase):
+ def setUp(self):
+ self.tree = etree.fromstring(
+ """
+ <cib>
+ <configuration>
+ <acls/>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def assert_element_content(self, section_element, expected_xml):
+ assert_xml_equal(etree_to_str(section_element), expected_xml)
+
+ def test_get_existing_mandatory(self):
+ self.assert_element_content(
+ sections.get(self.tree, sections.CONFIGURATION),
+ """
+ <configuration>
+ <acls/>
+ </configuration>
+ """
+ )
+
+ def test_get_existing_optinal(self):
+ self.assert_element_content(
+ sections.get(self.tree, sections.ACLS),
+ "<acls/>"
+ )
+
+ def test_get_no_existing_optinal(self):
+ self.assert_element_content(
+ sections.get(self.tree, sections.ALERTS),
+ "<alerts/>"
+ )
+ self.assert_element_content(
+ self.tree,
+ """
+ <cib>
+ <configuration>
+ <acls/>
+ <alerts/>
+ </configuration>
+ </cib>
+ """
+ )
+
+ def test_raises_on_no_existing_mandatory_section(self):
+ assert_raise_library_error(
+ lambda: sections.get(self.tree, sections.NODES),
+ (
+ severities.ERROR,
+ report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
+ {
+ "section": "configuration/nodes",
+ }
+ ),
+ )
diff --git a/pcs/lib/cib/test/test_tools.py b/pcs/lib/cib/test/test_tools.py
index 9db8dae..e495cff 100644
--- a/pcs/lib/cib/test/test_tools.py
+++ b/pcs/lib/cib/test/test_tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index cf91125..3af23f6 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -2,19 +2,19 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import re
from pcs.common.tools import is_string
from pcs.lib import reports
+from pcs.lib.cib import sections
from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker.values import validate_id
-from pcs.lib.xml_tools import (
- get_root,
- get_sub_element,
+from pcs.lib.pacemaker.values import (
+ sanitize_id,
+ validate_id,
)
+from pcs.lib.xml_tools import get_root
class IdProvider(object):
"""
@@ -178,7 +178,9 @@ def find_element_by_tag_and_id(
)
def create_subelement_id(context_element, suffix, id_provider=None):
- proposed_id = "{0}-{1}".format(context_element.get("id"), suffix)
+ proposed_id = sanitize_id(
+ "{0}-{1}".format(context_element.get("id"), suffix)
+ )
if id_provider:
return id_provider.allocate_id(proposed_id)
return find_unique_id(context_element, proposed_id)
@@ -187,64 +189,54 @@ def check_new_id_applicable(tree, description, id):
validate_id(id, description)
validate_id_does_not_exist(tree, id)
-def _get_mandatory_section(tree, section_name):
- """
- Return required element from tree, raise LibraryError if missing
- tree cib etree node
- """
- section = tree.find(".//{0}".format(section_name))
- if section is not None:
- return section
- raise LibraryError(reports.cib_missing_mandatory_section(section_name))
-
def get_configuration(tree):
"""
Return 'configuration' element from tree, raise LibraryError if missing
tree cib etree node
"""
- return _get_mandatory_section(tree, "configuration")
+ return sections.get(tree, sections.CONFIGURATION)
def get_acls(tree):
"""
Return 'acls' element from tree, create a new one if missing
tree cib etree node
"""
- return get_sub_element(get_configuration(tree), "acls")
+ return sections.get(tree, sections.ACLS)
def get_alerts(tree):
"""
Return 'alerts' element from tree, create a new one if missing
tree -- cib etree node
"""
- return get_sub_element(get_configuration(tree), "alerts")
+ return sections.get(tree, sections.ALERTS)
def get_constraints(tree):
"""
Return 'constraint' element from tree
tree cib etree node
"""
- return _get_mandatory_section(tree, "configuration/constraints")
+ return sections.get(tree, sections.CONSTRAINTS)
def get_fencing_topology(tree):
"""
Return the 'fencing-topology' element from the tree
tree -- cib etree node
"""
- return get_sub_element(get_configuration(tree), "fencing-topology")
+ return sections.get(tree, sections.FENCING_TOPOLOGY)
def get_nodes(tree):
"""
Return 'nodes' element from the tree
tree cib etree node
"""
- return _get_mandatory_section(tree, "configuration/nodes")
+ return sections.get(tree, sections.NODES)
def get_resources(tree):
"""
Return the 'resources' element from the tree
tree -- cib etree node
"""
- return _get_mandatory_section(tree, "configuration/resources")
+ return sections.get(tree, sections.RESOURCES)
def get_pacemaker_version_by_which_cib_was_validated(cib):
"""
diff --git a/pcs/lib/cluster_conf_facade.py b/pcs/lib/cluster_conf_facade.py
index 5f49c0f..e7f9488 100644
--- a/pcs/lib/cluster_conf_facade.py
+++ b/pcs/lib/cluster_conf_facade.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/lib/commands/acl.py b/pcs/lib/commands/acl.py
index 3c368de..37e3f14 100644
--- a/pcs/lib/commands/acl.py
+++ b/pcs/lib/commands/acl.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from contextlib import contextmanager
@@ -15,9 +14,8 @@ REQUIRED_CIB_VERSION = (2, 0, 0)
@contextmanager
def cib_acl_section(env):
- cib = env.get_cib(REQUIRED_CIB_VERSION)
- yield get_acls(cib)
- env.push_cib(cib)
+ yield get_acls(env.get_cib(REQUIRED_CIB_VERSION))
+ env.push_cib()
def create_role(lib_env, role_id, permission_info_list, description):
"""
diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
index a11d0a7..73f6f1e 100644
--- a/pcs/lib/commands/alert.py
+++ b/pcs/lib/commands/alert.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib import reports
@@ -35,13 +34,17 @@ def create_alert(
if not path:
raise LibraryError(reports.required_option_is_missing(["path"]))
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- alert_el = alert.create_alert(cib, alert_id, path, description)
+ alert_el = alert.create_alert(
+ lib_env.get_cib(REQUIRED_CIB_VERSION),
+ alert_id,
+ path,
+ description
+ )
alert.update_instance_attributes(alert_el, instance_attribute_dict)
alert.update_meta_attributes(alert_el, meta_attribute_dict)
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def update_alert(
@@ -63,13 +66,17 @@ def update_alert(
description -- new description, if empty string, old description will be
deleted, if None old value will stay unchanged
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- alert_el = alert.update_alert(cib, alert_id, path, description)
+ alert_el = alert.update_alert(
+ lib_env.get_cib(REQUIRED_CIB_VERSION),
+ alert_id,
+ path,
+ description
+ )
alert.update_instance_attributes(alert_el, instance_attribute_dict)
alert.update_meta_attributes(alert_el, meta_attribute_dict)
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def remove_alert(lib_env, alert_id_list):
@@ -88,7 +95,7 @@ def remove_alert(lib_env, alert_id_list):
report_list += e.args
lib_env.report_processor.process_list(report_list)
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def add_recipient(
@@ -118,10 +125,9 @@ def add_recipient(
reports.required_option_is_missing(["value"])
)
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
recipient = alert.add_recipient(
lib_env.report_processor,
- cib,
+ lib_env.get_cib(REQUIRED_CIB_VERSION),
alert_id,
recipient_value,
recipient_id=recipient_id,
@@ -131,7 +137,7 @@ def add_recipient(
alert.update_instance_attributes(recipient, instance_attribute_dict)
alert.update_meta_attributes(recipient, meta_attribute_dict)
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def update_recipient(
@@ -160,10 +166,9 @@ def update_recipient(
raise LibraryError(
reports.cib_alert_recipient_invalid_value(recipient_value)
)
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
recipient = alert.update_recipient(
lib_env.report_processor,
- cib,
+ lib_env.get_cib(REQUIRED_CIB_VERSION),
recipient_id,
recipient_value=recipient_value,
description=description,
@@ -172,7 +177,7 @@ def update_recipient(
alert.update_instance_attributes(recipient, instance_attribute_dict)
alert.update_meta_attributes(recipient, meta_attribute_dict)
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def remove_recipient(lib_env, recipient_id_list):
@@ -190,7 +195,7 @@ def remove_recipient(lib_env, recipient_id_list):
except LibraryError as e:
report_list += e.args
lib_env.report_processor.process_list(report_list)
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def get_all_alerts(lib_env):
diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py
index 73a3853..debfeb1 100644
--- a/pcs/lib/commands/booth.py
+++ b/pcs/lib/commands/booth.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import base64
@@ -20,13 +19,16 @@ from pcs.lib.booth import (
reports as booth_reports,
resource,
status,
- sync,
)
from pcs.lib.booth.config_parser import parse, build
from pcs.lib.booth.env import get_config_file_name
from pcs.lib.cib.tools import get_resources
+from pcs.lib.communication.booth import (
+ BoothGetConfig,
+ BoothSendConfig,
+)
+from pcs.lib.communication.tools import run_and_raise
from pcs.lib.errors import LibraryError, ReportItemSeverity
-from pcs.lib.node import NodeAddresses
from pcs.lib.resource_agent import find_valid_resource_agent_by_name
@@ -112,9 +114,11 @@ def config_text(env, name, node_name=None):
# TODO add name support
return env.booth.get_config_content()
- remote_data = sync.pull_config_from_node(
- env.node_communicator(), NodeAddresses(node_name), name
- )
+ com_cmd = BoothGetConfig(env.report_processor, name)
+ com_cmd.set_targets([
+ env.get_node_target_factory().get_target_from_hostname(node_name)
+ ])
+ remote_data = run_and_raise(env.get_node_communicator(), com_cmd)[0][1]
try:
return remote_data["config"]["data"]
except KeyError:
@@ -157,8 +161,7 @@ def create_in_cluster(env, name, ip, allow_absent_resource_agent=False):
bool allow_absent_resource_agent is flag allowing create booth resource even
if its agent is not installed
"""
- cib = env.get_cib()
- resources_section = get_resources(cib)
+ resources_section = get_resources(env.get_cib())
booth_config_file_path = get_config_file_name(name)
if resource.find_for_config(resources_section, booth_config_file_path):
@@ -196,7 +199,7 @@ def create_in_cluster(env, name, ip, allow_absent_resource_agent=False):
instance_attributes={"config": booth_config_file_path},
))
- env.push_cib(cib)
+ env.push_cib()
def remove_from_cluster(env, name, resource_remove, allow_remove_multiple):
#TODO resource_remove is provisional hack until resources are not moved to
@@ -257,17 +260,20 @@ def config_sync(env, name, skip_offline_nodes=False):
authfile_content = config_files.read_authfile(
env.report_processor, authfile_path
)
-
- sync.send_config_to_all_nodes(
- env.node_communicator(),
+ com_cmd = BoothSendConfig(
env.report_processor,
- env.get_corosync_conf().get_nodes(),
name,
config,
authfile=authfile_path,
authfile_data=authfile_content,
- skip_offline=skip_offline_nodes
+ skip_offline_targets=skip_offline_nodes
)
+ com_cmd.set_targets(
+ env.get_node_target_factory().get_target_list(
+ env.get_corosync_conf().get_nodes()
+ )
+ )
+ run_and_raise(env.get_node_communicator(), com_cmd)
def enable_booth(env, name=None):
@@ -363,9 +369,11 @@ def pull_config(env, node_name, name):
env.report_processor.process(
booth_reports.booth_fetching_config_from_node_started(node_name, name)
)
- output = sync.pull_config_from_node(
- env.node_communicator(), NodeAddresses(node_name), name
- )
+ com_cmd = BoothGetConfig(env.report_processor, name)
+ com_cmd.set_targets([
+ env.get_node_target_factory().get_target_from_hostname(node_name)
+ ])
+ output = run_and_raise(env.get_node_communicator(), com_cmd)[0][1]
try:
env.booth.create_config(output["config"]["data"], True)
if (
diff --git a/pcs/lib/commands/cib_options.py b/pcs/lib/commands/cib_options.py
new file mode 100644
index 0000000..d64c887
--- /dev/null
+++ b/pcs/lib/commands/cib_options.py
@@ -0,0 +1,38 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from functools import partial
+
+from pcs.lib import reports
+from pcs.lib.cib import sections
+from pcs.lib.xml_tools import remove_when_pointless
+from pcs.lib.cib.nvpair import arrange_first_meta_attributes
+
+
+def _set_any_defaults(section_name, env, options):
+ """
+ string section_name -- determine the section of defaults
+ LibraryEnvironment env -- provides access to outside environment
+ dict options -- are desired options with its values; when value is empty the
+ option have to be removed
+ """
+ env.report_processor.process(reports.defaults_can_be_overriden())
+
+ if not options:
+ return
+
+ defaults_section = sections.get(env.get_cib(), section_name)
+ arrange_first_meta_attributes(
+ defaults_section,
+ options,
+ new_id="{0}-options".format(section_name)
+ )
+ remove_when_pointless(defaults_section)
+
+ env.push_cib()
+
+set_operations_defaults = partial(_set_any_defaults, sections.OP_DEFAULTS)
+set_resources_defaults = partial(_set_any_defaults, sections.RSC_DEFAULTS)
diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
index a166ad5..5f1f7a6 100644
--- a/pcs/lib/commands/cluster.py
+++ b/pcs/lib/commands/cluster.py
@@ -2,499 +2,30 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
-from pcs.lib import reports, nodes_task, node_communication_format
-from pcs.lib.node import(
- NodeAddresses,
- NodeAddressesList,
+from pcs.lib import reports
+from pcs.lib.cib import fencing_topology
+from pcs.lib.cib.tools import (
+ get_fencing_topology,
+ get_resources,
+)
+from pcs.lib.env_tools import get_nodes
+from pcs.lib.errors import LibraryError
+from pcs.lib.node import (
node_addresses_contain_name,
node_addresses_contain_host,
)
-from pcs.lib.tools import generate_key
-from pcs.lib.cib.resource import guest_node, primitive, remote_node
-from pcs.lib.cib.tools import get_resources, find_element_by_tag_and_id
-from pcs.lib.env_tools import get_nodes, get_nodes_remote, get_nodes_guest
-from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker import state
-from pcs.lib.pacemaker.live import remove_node
-
-def _ensure_can_add_node_to_remote_cluster(
- env, node_addresses, warn_on_communication_exception=False
-):
- report_items = []
- nodes_task.check_can_add_node_to_cluster(
- env.node_communicator(),
- node_addresses,
- report_items,
- check_response=nodes_task.availability_checker_remote_node,
- warn_on_communication_exception=warn_on_communication_exception,
- )
- env.report_processor.process_list(report_items)
-
-def _share_authkey(
- env, current_nodes, candidate_node_addresses,
- skip_offline_nodes=False,
- allow_incomplete_distribution=False
-):
- if env.pacemaker.has_authkey:
- authkey_content = env.pacemaker.get_authkey_content()
- node_addresses_list = NodeAddressesList([candidate_node_addresses])
- else:
- authkey_content = generate_key()
- node_addresses_list = current_nodes + [candidate_node_addresses]
-
- nodes_task.distribute_files(
- env.node_communicator(),
- env.report_processor,
- node_communication_format.pcmk_authkey_file(authkey_content),
- node_addresses_list,
- skip_offline_nodes,
- allow_incomplete_distribution,
- description="remote node configuration files"
- )
-
-def _start_and_enable_pacemaker_remote(
- env, node_list, skip_offline_nodes=False, allow_fails=False
-):
- nodes_task.run_actions_on_multiple_nodes(
- env.node_communicator(),
- env.report_processor,
- node_communication_format.create_pcmk_remote_actions([
- "start",
- "enable",
- ]),
- lambda key, response: response.code == "success",
- node_list,
- skip_offline_nodes,
- allow_fails,
- description="start of service pacemaker_remote"
- )
-
-def _prepare_pacemaker_remote_environment(
- env, current_nodes, node_host, skip_offline_nodes,
- allow_incomplete_distribution, allow_fails
-):
- if not env.is_corosync_conf_live:
- env.report_processor.process_list([
- reports.nolive_skip_files_distribution(
- ["pacemaker authkey"],
- [node_host]
- ),
- reports.nolive_skip_service_command_on_nodes(
- "pacemaker_remote",
- "start",
- [node_host]
- ),
- reports.nolive_skip_service_command_on_nodes(
- "pacemaker_remote",
- "enable",
- [node_host]
- ),
- ])
- return
-
- candidate_node = NodeAddresses(node_host)
- _ensure_can_add_node_to_remote_cluster(
- env,
- candidate_node,
- skip_offline_nodes
- )
- _share_authkey(
- env,
- current_nodes,
- candidate_node,
- skip_offline_nodes,
- allow_incomplete_distribution
- )
- _start_and_enable_pacemaker_remote(
- env,
- [candidate_node],
- skip_offline_nodes,
- allow_fails
- )
-
-def _ensure_resource_running(env, resource_id):
- env.report_processor.process(
- state.ensure_resource_running(env.get_cluster_state(), resource_id)
- )
-
-def _ensure_consistently_live_env(env):
- if env.is_cib_live and env.is_corosync_conf_live:
- return
-
- #we accept is as well, we need it for tests
- if not env.is_cib_live and not env.is_corosync_conf_live:
- return
-
- raise LibraryError(reports.live_environment_required([
- "CIB" if not env.is_cib_live else "COROSYNC_CONF"
- ]))
-
-
-def node_add_remote(
- env, host, node_name, operations, meta_attributes, instance_attributes,
- skip_offline_nodes=False,
- allow_incomplete_distribution=False,
- allow_pacemaker_remote_service_fail=False,
- allow_invalid_operation=False,
- allow_invalid_instance_attributes=False,
- use_default_operations=True,
- wait=False,
-):
- """
- create resource ocf:pacemaker:remote and use it as remote node
-
- LibraryEnvironment env provides all for communication with externals
- list of dict operations contains attributes for each entered operation
- dict meta_attributes contains attributes for primitive/meta_attributes
- dict instance_attributes contains attributes for
- primitive/instance_attributes
- bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
- bool allow_incomplete_distribution -- is a flag for allowing successfully
- finish this command even if is file distribution not succeeded
- bool allow_pacemaker_remote_service_fail -- is a flag for allowing
- successfully finish this command even if starting/enabling
- pacemaker_remote not succeeded
- bool allow_invalid_operation is a flag for allowing to use operations that
- are not listed in a resource agent metadata
- bool allow_invalid_instance_attributes is a flag for allowing to use
- instance attributes that are not listed in a resource agent metadata
- or for allowing to not use the instance_attributes that are required in
- resource agent metadata
- bool use_default_operations is a flag for stopping stopping of adding
- default cib operations (specified in a resource agent)
- mixed wait is flag for controlling waiting for pacemaker iddle mechanism
- """
- _ensure_consistently_live_env(env)
- env.ensure_wait_satisfiable(wait)
-
- cib = env.get_cib()
- current_nodes = get_nodes(env.get_corosync_conf(), cib)
-
- resource_agent = remote_node.get_agent(
- env.report_processor,
- env.cmd_runner()
- )
-
- report_list = remote_node.validate_create(
- current_nodes,
- resource_agent,
- host,
- node_name,
- instance_attributes
- )
-
- try:
- remote_resource_element = remote_node.create(
- env.report_processor,
- resource_agent,
- get_resources(cib),
- host,
- node_name,
- operations,
- meta_attributes,
- instance_attributes,
- allow_invalid_operation,
- allow_invalid_instance_attributes,
- use_default_operations,
- )
- except LibraryError as e:
- #Check unique id conflict with check against nodes. Until validation
- #resource create is not separated, we need to make unique post
- #validation.
- already_exists = []
- unified_report_list = []
- for report in report_list + list(e.args):
- if report.code != report_codes.ID_ALREADY_EXISTS:
- unified_report_list.append(report)
- elif report.info["id"] not in already_exists:
- unified_report_list.append(report)
- already_exists.append(report.info["id"])
- report_list = unified_report_list
-
- env.report_processor.process_list(report_list)
-
- _prepare_pacemaker_remote_environment(
- env,
- current_nodes,
- host,
- skip_offline_nodes,
- allow_incomplete_distribution,
- allow_pacemaker_remote_service_fail,
- )
- env.push_cib(cib, wait)
- if wait:
- _ensure_resource_running(env, remote_resource_element.attrib["id"])
-
-def node_add_guest(
- env, node_name, resource_id, options,
- skip_offline_nodes=False,
- allow_incomplete_distribution=False,
- allow_pacemaker_remote_service_fail=False, wait=False,
-):
-
- """
- setup resource (resource_id) as guest node and setup node as guest
-
- LibraryEnvironment env provides all for communication with externals
- string resource_id -- specifies resource that should be guest node
- dict options could contain keys remote-node, remote-port, remote-addr,
- remote-connect-timeout
- bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
- bool allow_incomplete_distribution -- is a flag for allowing successfully
- finish this command even if is file distribution not succeeded
- bool allow_pacemaker_remote_service_fail -- is a flag for allowing
- successfully finish this command even if starting/enabling
- pacemaker_remote not succeeded
- mixed wait is flag for controlling waiting for pacemaker iddle mechanism
- """
- _ensure_consistently_live_env(env)
- env.ensure_wait_satisfiable(wait)
-
- cib = env.get_cib()
- current_nodes = get_nodes(env.get_corosync_conf(), cib)
-
- report_list = guest_node.validate_set_as_guest(
- cib,
- current_nodes,
- node_name,
- options
- )
- try:
- resource_element = find_element_by_tag_and_id(
- primitive.TAG,
- get_resources(cib),
- resource_id
- )
- report_list.extend(guest_node.validate_is_not_guest(resource_element))
- except LibraryError as e:
- report_list.extend(e.args)
-
- env.report_processor.process_list(report_list)
-
- guest_node.set_as_guest(
- resource_element,
- node_name,
- options.get("remote-addr", None),
- options.get("remote-port", None),
- options.get("remote-connect-timeout", None),
- )
-
- _prepare_pacemaker_remote_environment(
- env,
- current_nodes,
- guest_node.get_host_from_options(node_name, options),
- skip_offline_nodes,
- allow_incomplete_distribution,
- allow_pacemaker_remote_service_fail,
- )
-
- env.push_cib(cib, wait)
- if wait:
- _ensure_resource_running(env, resource_id)
-
-def _find_resources_to_remove(
- cib, report_processor,
- node_type, node_identifier, allow_remove_multiple_nodes,
- find_resources
-):
- resource_element_list = find_resources(get_resources(cib), node_identifier)
-
- if not resource_element_list:
- raise LibraryError(reports.node_not_found(node_identifier, node_type))
-
- if len(resource_element_list) > 1:
- report_processor.process(
- reports.get_problem_creator(
- report_codes.FORCE_REMOVE_MULTIPLE_NODES,
- allow_remove_multiple_nodes
- )(
- reports.multiple_result_found,
- "resource",
- [resource.attrib["id"] for resource in resource_element_list],
- node_identifier
- )
- )
-
- return resource_element_list
-
-def _get_node_addresses_from_resources(nodes, resource_element_list, get_host):
- node_addresses_set = set()
- for resource_element in resource_element_list:
- for node in nodes:
- #remote nodes uses ring0 only
- if get_host(resource_element) == node.ring0:
- node_addresses_set.add(node)
- return sorted(node_addresses_set, key=lambda node: node.ring0)
-
-def _destroy_pcmk_remote_env(
- env, node_addresses_list, skip_offline_nodes, allow_fails
-):
- actions = node_communication_format.create_pcmk_remote_actions([
- "stop",
- "disable",
- ])
- files = {
- "pacemaker_remote authkey": {"type": "pcmk_remote_authkey"},
- }
-
- nodes_task.run_actions_on_multiple_nodes(
- env.node_communicator(),
- env.report_processor,
- actions,
- lambda key, response: response.code == "success",
- node_addresses_list,
- skip_offline_nodes,
- allow_fails,
- description="stop of service pacemaker_remote"
- )
-
- nodes_task.remove_files(
- env.node_communicator(),
- env.report_processor,
- files,
- node_addresses_list,
- skip_offline_nodes,
- allow_fails,
- description="remote node files"
- )
-
-def _report_skip_live_parts_in_remove(node_addresses_list):
- #remote nodes uses ring0 only
- node_host_list = [addresses.ring0 for addresses in node_addresses_list]
- return [
- reports.nolive_skip_service_command_on_nodes(
- "pacemaker_remote",
- "stop",
- node_host_list
- ),
- reports.nolive_skip_service_command_on_nodes(
- "pacemaker_remote",
- "disable",
- node_host_list
- ),
- reports.nolive_skip_files_remove(["pacemaker authkey"], node_host_list)
- ]
-
-def node_remove_remote(
- env, node_identifier, remove_resource,
- skip_offline_nodes=False,
- allow_remove_multiple_nodes=False,
- allow_pacemaker_remote_service_fail=False
-):
- """
- remove a resource representing remote node and destroy remote node
-
- LibraryEnvironment env provides all for communication with externals
- string node_identifier -- node name or hostname
- callable remove_resource -- function for remove resource
- bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
- bool allow_remove_multiple_nodes -- is a flag for allowing
- remove unexpected multiple occurence of remote node for node_identifier
- bool allow_pacemaker_remote_service_fail -- is a flag for allowing
- successfully finish this command even if stoping/disabling
- pacemaker_remote not succeeded
- """
-
- _ensure_consistently_live_env(env)
- cib = env.get_cib()
- resource_element_list = _find_resources_to_remove(
- cib,
- env.report_processor,
- "remote",
- node_identifier,
- allow_remove_multiple_nodes,
- remote_node.find_node_resources,
- )
-
- node_addresses_list = _get_node_addresses_from_resources(
- get_nodes_remote(cib),
- resource_element_list,
- remote_node.get_host,
- )
-
- if not env.is_corosync_conf_live:
- env.report_processor.process_list(
- _report_skip_live_parts_in_remove(node_addresses_list)
- )
- else:
- _destroy_pcmk_remote_env(
- env,
- node_addresses_list,
- skip_offline_nodes,
- allow_pacemaker_remote_service_fail
- )
-
- #remove node from pcmk caches is currently integrated in remove_resource
- #function
- for resource_element in resource_element_list:
- remove_resource(
- resource_element.attrib["id"],
- is_remove_remote_context=True,
- )
-
-def node_remove_guest(
- env, node_identifier,
- skip_offline_nodes=False,
- allow_remove_multiple_nodes=False,
- allow_pacemaker_remote_service_fail=False,
- wait=False,
-):
- """
- remove a resource representing remote node and destroy remote node
-
- LibraryEnvironment env provides all for communication with externals
- string node_identifier -- node name, hostname or resource id
- bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
- bool allow_remove_multiple_nodes -- is a flag for allowing
- remove unexpected multiple occurence of remote node for node_identifier
- bool allow_pacemaker_remote_service_fail -- is a flag for allowing
- successfully finish this command even if stoping/disabling
- pacemaker_remote not succeeded
- """
- _ensure_consistently_live_env(env)
- env.ensure_wait_satisfiable(wait)
- cib = env.get_cib()
-
- resource_element_list = _find_resources_to_remove(
- cib,
- env.report_processor,
- "guest",
- node_identifier,
- allow_remove_multiple_nodes,
- guest_node.find_node_resources,
- )
-
- node_addresses_list = _get_node_addresses_from_resources(
- get_nodes_guest(cib),
- resource_element_list,
- guest_node.get_host,
- )
-
- if not env.is_corosync_conf_live:
- env.report_processor.process_list(
- _report_skip_live_parts_in_remove(node_addresses_list)
- )
- else:
- _destroy_pcmk_remote_env(
- env,
- node_addresses_list,
- skip_offline_nodes,
- allow_pacemaker_remote_service_fail
- )
-
- for resource_element in resource_element_list:
- guest_node.unset_guest(resource_element)
-
- env.push_cib(cib, wait)
-
- #remove node from pcmk caches
- if env.is_cib_live:
- for node_addresses in node_addresses_list:
- remove_node(env.cmd_runner(), node_addresses.name)
+from pcs.lib.pacemaker.live import (
+ get_cib,
+ get_cib_xml,
+ get_cib_xml_cmd_results,
+ get_cluster_status_xml,
+ remove_node,
+ verify as verify_cmd,
+)
+from pcs.lib.pacemaker.state import ClusterState
def node_clear(env, node_name, allow_clear_cluster_node=False):
@@ -531,3 +62,39 @@ def node_clear(env, node_name, allow_clear_cluster_node=False):
)
remove_node(env.cmd_runner(), node_name)
+
+def verify(env, verbose=False):
+ runner = env.cmd_runner()
+ dummy_stdout, verify_stderr, verify_returncode = verify_cmd(
+ runner,
+ verbose=verbose,
+ )
+
+ #1) Do not even try to think about upgrading!
+ #2) We do not need cib management in env (no need for push...).
+ #So env.get_cib is not best choice here (there were considerations to
+ #upgrade cib at all times inside env.get_cib). Go to a lower level here.
+ if verify_returncode != 0:
+ env.report_processor.append(reports.invalid_cib_content(verify_stderr))
+
+ #Cib is sometimes loadable even if `crm_verify` fails (e.g. when
+ #fencing topology is invalid). On the other hand cib with id duplication
+ #is not loadable.
+ #We try extra checks when cib is possible to load.
+ cib_xml, dummy_stderr, returncode = get_cib_xml_cmd_results(runner)
+ if returncode != 0:
+ #can raise; raise LibraryError is better but in this case we prefer
+ #be consistent with raising below
+ env.report_processor.send()
+ else:
+ cib_xml = get_cib_xml(runner)
+
+ cib = get_cib(cib_xml)
+ fencing_topology.verify(
+ env.report_processor,
+ get_fencing_topology(cib),
+ get_resources(cib),
+ ClusterState(get_cluster_status_xml(runner)).node_section.nodes
+ )
+ #can raise
+ env.report_processor.send()
diff --git a/pcs/lib/commands/constraint/colocation.py b/pcs/lib/commands/constraint/colocation.py
index e384867..b7ff402 100644
--- a/pcs/lib/commands/constraint/colocation.py
+++ b/pcs/lib/commands/constraint/colocation.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/commands/constraint/common.py b/pcs/lib/commands/constraint/common.py
index aef1403..ad5a252 100644
--- a/pcs/lib/commands/constraint/common.py
+++ b/pcs/lib/commands/constraint/common.py
@@ -7,7 +7,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
@@ -68,7 +67,7 @@ def create_with_set(
duplication_alowed=duplication_alowed,
)
- env.push_cib(cib)
+ env.push_cib()
def show(tag_name, is_plain, env):
"""
diff --git a/pcs/lib/commands/constraint/order.py b/pcs/lib/commands/constraint/order.py
index d2b2b33..ecf68b2 100644
--- a/pcs/lib/commands/constraint/order.py
+++ b/pcs/lib/commands/constraint/order.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
diff --git a/pcs/lib/commands/constraint/ticket.py b/pcs/lib/commands/constraint/ticket.py
index a14c5ad..b388769 100644
--- a/pcs/lib/commands/constraint/ticket.py
+++ b/pcs/lib/commands/constraint/ticket.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
@@ -67,7 +66,7 @@ def create(
duplication_alowed=duplication_alowed,
)
- env.push_cib(cib)
+ env.push_cib()
def remove(env, ticket_key, resource_id):
"""
@@ -75,8 +74,7 @@ def remove(env, ticket_key, resource_id):
If resource is in resource set with another resources then only resource ref
is removed. If resource is alone in resource set whole constraint is removed.
"""
- cib = env.get_cib()
- constraint_section = get_constraints(cib)
+ constraint_section = get_constraints(env.get_cib())
any_plain_removed = ticket.remove_plain(
constraint_section,
ticket_key,
@@ -88,6 +86,6 @@ def remove(env, ticket_key, resource_id):
resource_id
)
- env.push_cib(cib)
+ env.push_cib()
return any_plain_removed or any_with_resource_set_removed
diff --git a/pcs/lib/commands/fencing_topology.py b/pcs/lib/commands/fencing_topology.py
index e7d9003..07c5e6b 100644
--- a/pcs/lib/commands/fencing_topology.py
+++ b/pcs/lib/commands/fencing_topology.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common.fencing_topology import (
@@ -54,7 +53,7 @@ def add_level(
force_node
)
lib_env.report_processor.send()
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def get_config(lib_env):
"""
@@ -73,9 +72,10 @@ def remove_all_levels(lib_env):
Remove all fencing levels
LibraryError lib_env -- environment
"""
- cib = lib_env.get_cib()
- cib_fencing_topology.remove_all_levels(get_fencing_topology(cib))
- lib_env.push_cib(cib)
+ cib_fencing_topology.remove_all_levels(
+ get_fencing_topology(lib_env.get_cib())
+ )
+ lib_env.push_cib()
def remove_levels_by_params(
lib_env, level=None, target_type=None, target_value=None, devices=None,
@@ -91,10 +91,9 @@ def remove_levels_by_params(
Iterable devices -- list of stonith devices for the new fencing level
bool ignore_if_missing -- when True, do not report if level not found
"""
- cib = lib_env.get_cib()
cib_fencing_topology.remove_levels_by_params(
lib_env.report_processor,
- get_fencing_topology(cib),
+ get_fencing_topology(lib_env.get_cib()),
level,
target_type,
target_value,
@@ -102,7 +101,7 @@ def remove_levels_by_params(
ignore_if_missing
)
lib_env.report_processor.send()
- lib_env.push_cib(cib)
+ lib_env.push_cib()
def verify(lib_env):
"""
diff --git a/pcs/lib/commands/node.py b/pcs/lib/commands/node.py
index cc9424f..59ca820 100644
--- a/pcs/lib/commands/node.py
+++ b/pcs/lib/commands/node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from contextlib import contextmanager
@@ -21,14 +20,13 @@ from pcs.lib.pacemaker.state import ClusterState
def cib_runner_nodes(lib_env, wait):
lib_env.ensure_wait_satisfiable(wait)
runner = lib_env.cmd_runner()
- cib = lib_env.get_cib()
state_nodes = ClusterState(
get_cluster_status_xml(runner)
).node_section.nodes
- yield (cib, runner, state_nodes)
- lib_env.push_cib(cib, wait)
+ yield (lib_env.get_cib(), runner, state_nodes)
+ lib_env.push_cib(wait=wait)
def standby_unstandby_local(lib_env, standby, wait=False):
diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
index 119c51d..9285f6e 100644
--- a/pcs/lib/commands/qdevice.py
+++ b/pcs/lib/commands/qdevice.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import base64
diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
index aa98e61..6d0ab38 100644
--- a/pcs/lib/commands/quorum.py
+++ b/pcs/lib/commands/quorum.py
@@ -2,22 +2,21 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
from pcs.lib import reports, sbd
from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs.lib.communication import (
+ qdevice as qdevice_com,
+ qdevice_net as qdevice_net_com,
+)
+from pcs.lib.communication.tools import run_and_raise
from pcs.lib.corosync import (
live as corosync_live,
qdevice_net,
qdevice_client
)
-from pcs.lib.external import (
- NodeCommunicationException,
- node_communicator_exception_to_report_item,
- parallel_nodes_communication_helper,
-)
def get_config(lib_env):
@@ -127,6 +126,9 @@ def add_device(
force_model,
force_options
)
+ target_list = lib_env.get_node_target_factory().get_target_list(
+ cfg.get_nodes()
+ )
# First setup certificates for qdevice, then send corosync.conf to nodes.
# If anything fails, nodes will not have corosync.conf with qdevice in it,
@@ -148,16 +150,11 @@ def add_device(
lib_env.report_processor.process(
reports.service_enable_started("corosync-qdevice")
)
- communicator = lib_env.node_communicator()
- parallel_nodes_communication_helper(
- qdevice_client.remote_client_enable,
- [
- [(lib_env.report_processor, communicator, node), {}]
- for node in cfg.get_nodes()
- ],
- lib_env.report_processor,
- skip_offline_nodes
+ com_cmd = qdevice_com.Enable(
+ lib_env.report_processor, skip_offline_nodes
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
# everything set up, it's safe to tell the nodes to use qdevice
lib_env.push_corosync_conf(cfg, skip_offline_nodes)
@@ -167,16 +164,11 @@ def add_device(
lib_env.report_processor.process(
reports.service_start_started("corosync-qdevice")
)
- communicator = lib_env.node_communicator()
- parallel_nodes_communication_helper(
- qdevice_client.remote_client_start,
- [
- [(lib_env.report_processor, communicator, node), {}]
- for node in cfg.get_nodes()
- ],
- lib_env.report_processor,
- skip_offline_nodes
+ com_cmd = qdevice_com.Start(
+ lib_env.report_processor, skip_offline_nodes
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
def _add_device_model_net(
lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes
@@ -188,69 +180,46 @@ def _add_device_model_net(
NodeAddressesList cluster_nodes list of cluster nodes addresses
bool skip_offline_nodes continue even if not all nodes are accessible
"""
- communicator = lib_env.node_communicator()
runner = lib_env.cmd_runner()
reporter = lib_env.report_processor
+ target_factory = lib_env.get_node_target_factory()
+ qnetd_target = target_factory.get_target_from_hostname(qnetd_host)
+ target_list = target_factory.get_target_list(cluster_nodes)
reporter.process(
reports.qdevice_certificate_distribution_started()
)
# get qnetd CA certificate
- try:
- qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate(
- communicator,
- qnetd_host
- )
- except NodeCommunicationException as e:
- raise LibraryError(
- node_communicator_exception_to_report_item(e)
- )
+ com_cmd = qdevice_net_com.GetCaCert(reporter)
+ com_cmd.set_targets([qnetd_target])
+ qnetd_ca_cert = run_and_raise(
+ lib_env.get_node_communicator(), com_cmd
+ )[0][1]
# init certificate storage on all nodes
- parallel_nodes_communication_helper(
- qdevice_net.remote_client_setup,
- [
- ((communicator, node, qnetd_ca_cert), {})
- for node in cluster_nodes
- ],
- reporter,
- skip_offline_nodes
+ com_cmd = qdevice_net_com.ClientSetup(
+ reporter, qnetd_ca_cert, skip_offline_nodes
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
# create client certificate request
cert_request = qdevice_net.client_generate_certificate_request(
runner,
cluster_name
)
# sign the request on qnetd host
- try:
- signed_certificate = qdevice_net.remote_sign_certificate_request(
- communicator,
- qnetd_host,
- cert_request,
- cluster_name
- )
- except NodeCommunicationException as e:
- raise LibraryError(
- node_communicator_exception_to_report_item(e)
- )
+ com_cmd = qdevice_net_com.SignCertificate(reporter)
+ com_cmd.add_request(qnetd_target, cert_request, cluster_name)
+ signed_certificate = run_and_raise(
+ lib_env.get_node_communicator(), com_cmd
+ )[0][1]
# transform the signed certificate to pk12 format which can sent to nodes
pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate)
# distribute final certificate to nodes
- def do_and_report(reporter, communicator, node, pk12):
- qdevice_net.remote_client_import_certificate_and_key(
- communicator, node, pk12
- )
- reporter.process(
- reports.qdevice_certificate_accepted_by_node(node.label)
- )
- parallel_nodes_communication_helper(
- do_and_report,
- [
- ((reporter, communicator, node, pk12), {})
- for node in cluster_nodes
- ],
- reporter,
- skip_offline_nodes
+ com_cmd = qdevice_net_com.ClientImportCertificateAndKey(
+ reporter, pk12, skip_offline_nodes
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
def update_device(
lib_env, model_options, generic_options, force_options=False,
@@ -285,7 +254,9 @@ def remove_device(lib_env, skip_offline_nodes=False):
cfg.remove_quorum_device()
if lib_env.is_corosync_conf_live:
- communicator = lib_env.node_communicator()
+ target_list = lib_env.get_node_target_factory().get_target_list(
+ cfg.get_nodes()
+ )
# fix quorum options for SBD to work properly
if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg):
lib_env.report_processor.process(reports.sbd_requires_atb())
@@ -297,28 +268,20 @@ def remove_device(lib_env, skip_offline_nodes=False):
lib_env.report_processor.process(
reports.service_disable_started("corosync-qdevice")
)
- parallel_nodes_communication_helper(
- qdevice_client.remote_client_disable,
- [
- [(lib_env.report_processor, communicator, node), {}]
- for node in cfg.get_nodes()
- ],
- lib_env.report_processor,
- skip_offline_nodes
+ com_cmd = qdevice_com.Disable(
+ lib_env.report_processor, skip_offline_nodes
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
# stop qdevice
lib_env.report_processor.process(
reports.service_stop_started("corosync-qdevice")
)
- parallel_nodes_communication_helper(
- qdevice_client.remote_client_stop,
- [
- [(lib_env.report_processor, communicator, node), {}]
- for node in cfg.get_nodes()
- ],
- lib_env.report_processor,
- skip_offline_nodes
+ com_cmd = qdevice_com.Stop(
+ lib_env.report_processor, skip_offline_nodes
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
# handle model specific configuration
if model == "net":
_remove_device_model_net(
@@ -336,25 +299,15 @@ def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
bool skip_offline_nodes continue even if not all nodes are accessible
"""
reporter = lib_env.report_processor
- communicator = lib_env.node_communicator()
reporter.process(
reports.qdevice_certificate_removal_started()
)
- def do_and_report(reporter, communicator, node):
- qdevice_net.remote_client_destroy(communicator, node)
- reporter.process(
- reports.qdevice_certificate_removed_from_node(node.label)
- )
- parallel_nodes_communication_helper(
- do_and_report,
- [
- [(reporter, communicator, node), {}]
- for node in cluster_nodes
- ],
- lib_env.report_processor,
- skip_offline_nodes
+ com_cmd = qdevice_net_com.ClientDestroy(reporter)
+ com_cmd.set_targets(
+ lib_env.get_node_target_factory().get_target_list(cluster_nodes)
)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
def set_expected_votes_live(lib_env, expected_votes):
"""
diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/remote_node.py
similarity index 85%
copy from pcs/lib/commands/cluster.py
copy to pcs/lib/commands/remote_node.py
index a166ad5..da87bb7 100644
--- a/pcs/lib/commands/cluster.py
+++ b/pcs/lib/commands/remote_node.py
@@ -2,20 +2,25 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
-from pcs.lib import reports, nodes_task, node_communication_format
+from pcs.lib import reports, node_communication_format
from pcs.lib.node import(
NodeAddresses,
NodeAddressesList,
- node_addresses_contain_name,
- node_addresses_contain_host,
)
from pcs.lib.tools import generate_key
from pcs.lib.cib.resource import guest_node, primitive, remote_node
from pcs.lib.cib.tools import get_resources, find_element_by_tag_and_id
+from pcs.lib.communication.nodes import (
+ availability_checker_remote_node,
+ DistributeFiles,
+ PrecheckNewNode,
+ RemoveFiles,
+ ServiceAction,
+)
+from pcs.lib.communication.tools import run, run_and_raise
from pcs.lib.env_tools import get_nodes, get_nodes_remote, get_nodes_guest
from pcs.lib.errors import LibraryError
from pcs.lib.pacemaker import state
@@ -25,13 +30,15 @@ def _ensure_can_add_node_to_remote_cluster(
env, node_addresses, warn_on_communication_exception=False
):
report_items = []
- nodes_task.check_can_add_node_to_cluster(
- env.node_communicator(),
- node_addresses,
+ com_cmd = PrecheckNewNode(
report_items,
- check_response=nodes_task.availability_checker_remote_node,
- warn_on_communication_exception=warn_on_communication_exception,
+ availability_checker_remote_node,
+ skip_offline_targets=warn_on_communication_exception,
+ )
+ com_cmd.add_request(
+ env.get_node_target_factory().get_target(node_addresses)
)
+ run(env.get_node_communicator(), com_cmd)
env.report_processor.process_list(report_items)
def _share_authkey(
@@ -46,32 +53,35 @@ def _share_authkey(
authkey_content = generate_key()
node_addresses_list = current_nodes + [candidate_node_addresses]
- nodes_task.distribute_files(
- env.node_communicator(),
+ com_cmd = DistributeFiles(
env.report_processor,
node_communication_format.pcmk_authkey_file(authkey_content),
- node_addresses_list,
- skip_offline_nodes,
- allow_incomplete_distribution,
- description="remote node configuration files"
+ skip_offline_targets=skip_offline_nodes,
+ allow_fails=allow_incomplete_distribution,
+ description="remote node configuration files",
+ )
+ com_cmd.set_targets(
+ env.get_node_target_factory().get_target_list(node_addresses_list)
)
+ run_and_raise(env.get_node_communicator(), com_cmd)
def _start_and_enable_pacemaker_remote(
env, node_list, skip_offline_nodes=False, allow_fails=False
):
- nodes_task.run_actions_on_multiple_nodes(
- env.node_communicator(),
+ com_cmd = ServiceAction(
env.report_processor,
node_communication_format.create_pcmk_remote_actions([
"start",
"enable",
]),
- lambda key, response: response.code == "success",
- node_list,
- skip_offline_nodes,
- allow_fails,
+ skip_offline_targets=skip_offline_nodes,
+ allow_fails=allow_fails,
description="start of service pacemaker_remote"
)
+ com_cmd.set_targets(
+ env.get_node_target_factory().get_target_list(node_list)
+ )
+ run_and_raise(env.get_node_communicator(), com_cmd)
def _prepare_pacemaker_remote_environment(
env, current_nodes, node_host, skip_offline_nodes,
@@ -225,7 +235,7 @@ def node_add_remote(
allow_incomplete_distribution,
allow_pacemaker_remote_service_fail,
)
- env.push_cib(cib, wait)
+ env.push_cib(wait=wait)
if wait:
_ensure_resource_running(env, remote_resource_element.attrib["id"])
@@ -292,7 +302,7 @@ def node_add_guest(
allow_pacemaker_remote_service_fail,
)
- env.push_cib(cib, wait)
+ env.push_cib(wait=wait)
if wait:
_ensure_resource_running(env, resource_id)
@@ -340,27 +350,30 @@ def _destroy_pcmk_remote_env(
files = {
"pacemaker_remote authkey": {"type": "pcmk_remote_authkey"},
}
+ target_list = env.get_node_target_factory().get_target_list(
+ node_addresses_list
+ )
- nodes_task.run_actions_on_multiple_nodes(
- env.node_communicator(),
+ com_cmd = ServiceAction(
env.report_processor,
actions,
- lambda key, response: response.code == "success",
- node_addresses_list,
- skip_offline_nodes,
- allow_fails,
- description="stop of service pacemaker_remote"
+ skip_offline_targets=skip_offline_nodes,
+ allow_fails=allow_fails,
+ description="stop of service pacemaker_remote",
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(env.get_node_communicator(), com_cmd)
- nodes_task.remove_files(
- env.node_communicator(),
+ com_cmd = RemoveFiles(
env.report_processor,
files,
- node_addresses_list,
- skip_offline_nodes,
- allow_fails,
- description="remote node files"
+ skip_offline_targets=skip_offline_nodes,
+ allow_fails=allow_fails,
+ description="remote node files",
)
+ com_cmd.set_targets(target_list)
+ run_and_raise(env.get_node_communicator(), com_cmd)
+
def _report_skip_live_parts_in_remove(node_addresses_list):
#remote nodes uses ring0 only
@@ -489,45 +502,9 @@ def node_remove_guest(
for resource_element in resource_element_list:
guest_node.unset_guest(resource_element)
- env.push_cib(cib, wait)
+ env.push_cib(wait=wait)
#remove node from pcmk caches
if env.is_cib_live:
for node_addresses in node_addresses_list:
remove_node(env.cmd_runner(), node_addresses.name)
-
-
-def node_clear(env, node_name, allow_clear_cluster_node=False):
- """
- Remove specified node from various cluster caches.
-
- LibraryEnvironment env provides all for communication with externals
- string node_name
- bool allow_clear_cluster_node -- flag allows to clear node even if it's
- still in a cluster
- """
- mocked_envs = []
- if not env.is_cib_live:
- mocked_envs.append("CIB")
- if not env.is_corosync_conf_live:
- mocked_envs.append("COROSYNC_CONF")
- if mocked_envs:
- raise LibraryError(reports.live_environment_required(mocked_envs))
-
- current_nodes = get_nodes(env.get_corosync_conf(), env.get_cib())
- if(
- node_addresses_contain_name(current_nodes, node_name)
- or
- node_addresses_contain_host(current_nodes, node_name)
- ):
- env.report_processor.process(
- reports.get_problem_creator(
- report_codes.FORCE_CLEAR_CLUSTER_NODE,
- allow_clear_cluster_node
- )(
- reports.node_to_clear_is_still_in_cluster,
- node_name
- )
- )
-
- remove_node(env.cmd_runner(), node_name)
diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
index 0c5f682..8cda310 100644
--- a/pcs/lib/commands/resource.py
+++ b/pcs/lib/commands/resource.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from contextlib import contextmanager
@@ -39,9 +38,8 @@ def resource_environment(
required_cib_version=None
):
env.ensure_wait_satisfiable(wait)
- cib = env.get_cib(required_cib_version)
- yield get_resources(cib)
- env.push_cib(cib, wait)
+ yield get_resources(env.get_cib(required_cib_version))
+ env.push_cib(wait=wait)
if wait is not False and wait_for_resource_ids:
state = env.get_cluster_state()
env.report_processor.process_list([
diff --git a/pcs/lib/commands/resource_agent.py b/pcs/lib/commands/resource_agent.py
index e3c6acf..f2f2312 100644
--- a/pcs/lib/commands/resource_agent.py
+++ b/pcs/lib/commands/resource_agent.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib import resource_agent
diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
index 6d0a4f3..8dc315c 100644
--- a/pcs/lib/commands/sbd.py
+++ b/pcs/lib/commands/sbd.py
@@ -2,37 +2,38 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
-import json
from pcs import settings
-from pcs.common import (
- tools,
- report_codes,
+from pcs.common import report_codes
+from pcs.lib.communication.sbd import (
+ CheckSbd,
+ DisableSbdService,
+ EnableSbdService,
+ GetSbdConfig,
+ GetSbdStatus,
+ RemoveStonithWatchdogTimeout,
+ SetSbdConfig,
+ SetStonithWatchdogTimeoutToZero,
+)
+from pcs.lib.communication.nodes import GetOnlineTargets
+from pcs.lib.communication.corosync import CheckCorosyncOffline
+from pcs.lib.communication.tools import (
+ run as run_com,
+ run_and_raise,
)
from pcs.lib import (
sbd,
reports,
- nodes_task,
)
from pcs.lib.tools import environment_file_to_dict
from pcs.lib.errors import (
LibraryError,
ReportItemSeverity as Severities
)
-from pcs.lib.external import (
- node_communicator_exception_to_report_item,
- NodeCommunicationException,
- NodeConnectionException,
- NodeCommandUnsuccessfulException,
-)
-from pcs.lib.node import (
- NodeAddressesList,
- NodeNotFound
-)
+from pcs.lib.node import NodeNotFound
from pcs.lib.validate import (
names_in,
run_collection_of_option_validators,
@@ -110,21 +111,21 @@ def _validate_device_dict(node_device_dict):
devices as values
"""
report_item_list = []
- for node, device_list in node_device_dict.items():
+ for node_label, device_list in node_device_dict.items():
if not device_list:
report_item_list.append(
- reports.sbd_no_device_for_node(node.label)
+ reports.sbd_no_device_for_node(node_label)
)
continue
elif len(device_list) > settings.sbd_max_device_num:
report_item_list.append(reports.sbd_too_many_devices_for_node(
- node.label, device_list, settings.sbd_max_device_num
+ node_label, device_list, settings.sbd_max_device_num
))
continue
for device in device_list:
if not device or not os.path.isabs(device):
report_item_list.append(
- reports.sbd_device_path_not_absolute(device, node.label)
+ reports.sbd_device_path_not_absolute(device, node_label)
)
return report_item_list
@@ -148,19 +149,19 @@ def _check_node_names_in_cluster(node_list, node_name_list):
return [reports.node_not_found(node) for node in not_existing_node_set]
-def _get_full_node_dict(node_list, node_value_dict, default_value):
+def _get_full_target_dict(target_list, node_value_dict, default_value):
"""
- Returns dictionary where keys NodeAdressesof all nodes in cluster and value
- is obtained from node_value_dict for node name, or default+value if node
- nade is not specified in node_value_dict.
+ Returns dictionary where keys are labels of all nodes in cluster and value
+ is obtained from node_value_dict for node name, or default value if node
+ is not specified in node_value_dict.
- node_list -- NodeAddressesList
+ list node_list -- list of cluster nodes (RequestTarget object)
node_value_dict -- dictionary, keys: node names, values: some velue
default_value -- some default value
"""
return dict([
- (node, node_value_dict.get(node.label, default_value))
- for node in node_list
+ (target.label, node_value_dict.get(target.label, default_value))
+ for target in target_list
])
@@ -185,6 +186,7 @@ def enable_sbd(
ignore_offline_nodes -- if True, omit offline nodes
"""
node_list = _get_cluster_nodes(lib_env)
+ target_list = lib_env.get_node_target_factory().get_target_list(node_list)
using_devices = not (
default_device_list is None and node_device_dict is None
)
@@ -196,11 +198,11 @@ def enable_sbd(
default_watchdog = settings.sbd_watchdog_default
sbd_options = dict([(opt.upper(), val) for opt, val in sbd_options.items()])
- full_watchdog_dict = _get_full_node_dict(
- node_list, watchdog_dict, default_watchdog
+ full_watchdog_dict = _get_full_target_dict(
+ target_list, watchdog_dict, default_watchdog
)
- full_device_dict = _get_full_node_dict(
- node_list, node_device_dict, default_device_list
+ full_device_dict = _get_full_target_dict(
+ target_list, node_device_dict, default_device_list
)
lib_env.report_processor.process_list(
@@ -215,23 +217,23 @@ def enable_sbd(
_validate_sbd_options(sbd_options, allow_unknown_opts)
)
- online_nodes = _get_online_nodes(lib_env, node_list, ignore_offline_nodes)
-
- node_data_dict = {}
- for node in online_nodes:
- node_data_dict[node] = {
- "watchdog": full_watchdog_dict[node],
- "device_list": full_device_dict[node] if using_devices else [],
- }
+ com_cmd = GetOnlineTargets(
+ lib_env.report_processor, ignore_offline_targets=ignore_offline_nodes,
+ )
+ com_cmd.set_targets(target_list)
+ online_targets = run_and_raise(lib_env.get_node_communicator(), com_cmd)
# check if SBD can be enabled
- sbd.check_sbd_on_all_nodes(
- lib_env.report_processor,
- lib_env.node_communicator(),
- node_data_dict,
- )
+ com_cmd = CheckSbd(lib_env.report_processor)
+ for target in online_targets:
+ com_cmd.add_request(
+ target,
+ full_watchdog_dict[target.label],
+ full_device_dict[target.label] if using_devices else [],
+ )
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
- # enable ATB if needed
+ # enable ATB if neede
if not lib_env.is_cman_cluster and not using_devices:
corosync_conf = lib_env.get_corosync_conf()
if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf):
@@ -244,24 +246,28 @@ def enable_sbd(
# distribute SBD configuration
config = sbd.get_default_sbd_config()
config.update(sbd_options)
- sbd.set_sbd_config_on_all_nodes(
- lib_env.report_processor,
- lib_env.node_communicator(),
- online_nodes,
- config,
- full_watchdog_dict,
- full_device_dict,
- )
+ com_cmd = SetSbdConfig(lib_env.report_processor)
+ for target in online_targets:
+ com_cmd.add_request(
+ target,
+ sbd.create_sbd_config(
+ config,
+ target.label,
+ full_watchdog_dict[target.label],
+ full_device_dict[target.label]
+ )
+ )
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
# remove cluster prop 'stonith_watchdog_timeout'
- sbd.remove_stonith_watchdog_timeout_on_all_nodes(
- lib_env.node_communicator(), online_nodes
- )
+ com_cmd = RemoveStonithWatchdogTimeout(lib_env.report_processor)
+ com_cmd.set_targets(online_targets)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
# enable SBD service an all nodes
- sbd.enable_sbd_service_on_all_nodes(
- lib_env.report_processor, lib_env.node_communicator(), online_nodes
- )
+ com_cmd = EnableSbdService(lib_env.report_processor)
+ com_cmd.set_targets(online_targets)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
lib_env.report_processor.process(
reports.cluster_restart_required_to_apply_changes()
@@ -275,26 +281,30 @@ def disable_sbd(lib_env, ignore_offline_nodes=False):
lib_env -- LibraryEnvironment
ignore_offline_nodes -- if True, omit offline nodes
"""
- node_list = _get_online_nodes(
- lib_env, _get_cluster_nodes(lib_env), ignore_offline_nodes
+ com_cmd = GetOnlineTargets(
+ lib_env.report_processor, ignore_offline_targets=ignore_offline_nodes,
+ )
+ com_cmd.set_targets(
+ lib_env.get_node_target_factory().get_target_list(
+ _get_cluster_nodes(lib_env)
+ )
)
+ online_nodes = run_and_raise(lib_env.get_node_communicator(), com_cmd)
if lib_env.is_cman_cluster:
- nodes_task.check_corosync_offline_on_nodes(
- lib_env.node_communicator(),
- lib_env.report_processor,
- node_list,
- ignore_offline_nodes
+ com_cmd = CheckCorosyncOffline(
+ lib_env.report_processor, skip_offline_targets=ignore_offline_nodes,
)
+ com_cmd.set_targets(online_nodes)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
- sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes(
- lib_env.node_communicator(), node_list
- )
- sbd.disable_sbd_service_on_all_nodes(
- lib_env.report_processor,
- lib_env.node_communicator(),
- node_list
- )
+ com_cmd = SetStonithWatchdogTimeoutToZero(lib_env.report_processor)
+ com_cmd.set_targets(online_nodes)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
+
+ com_cmd = DisableSbdService(lib_env.report_processor)
+ com_cmd.set_targets(online_nodes)
+ run_and_raise(lib_env.get_node_communicator(), com_cmd)
if not lib_env.is_cman_cluster:
lib_env.report_processor.process(
@@ -302,39 +312,6 @@ def disable_sbd(lib_env, ignore_offline_nodes=False):
)
-def _get_online_nodes(lib_env, node_list, ignore_offline_nodes=False):
- """
- Returns NodeAddressesList of online nodes.
- Raises LibraryError on any failure.
-
- lib_env -- LibraryEnvironment
- node_list -- NodeAddressesList
- ignore_offline_nodes -- if True offline nodes are just omitted from
- returned list.
- """
- to_raise = []
- online_node_list = NodeAddressesList()
-
- def is_node_online(node):
- try:
- nodes_task.node_check_auth(lib_env.node_communicator(), node)
- online_node_list.append(node)
- except NodeConnectionException as e:
- if ignore_offline_nodes:
- to_raise.append(reports.omitting_node(node.label))
- else:
- to_raise.append(node_communicator_exception_to_report_item(
- e, Severities.ERROR, report_codes.SKIP_OFFLINE_NODES
- ))
- except NodeCommunicationException as e:
- to_raise.append(node_communicator_exception_to_report_item(e))
-
- tools.run_parallel(is_node_online, [([node], {}) for node in node_list])
-
- lib_env.report_processor.process_list(to_raise)
- return online_node_list
-
-
def get_cluster_sbd_status(lib_env):
"""
Returns status of SBD service in cluster in dictionary with format:
@@ -349,51 +326,13 @@ def get_cluster_sbd_status(lib_env):
lib_env -- LibraryEnvironment
"""
- node_list = _get_cluster_nodes(lib_env)
- report_item_list = []
- successful_node_list = []
- status_list = []
-
- def get_sbd_status(node):
- try:
- status_list.append({
- "node": node.label,
- "status": json.loads(
- # here we just need info about sbd service,
- # therefore watchdog and device list is empty
- sbd.check_sbd(lib_env.node_communicator(), node, "", [])
- )["sbd"]
- })
- successful_node_list.append(node)
- except NodeCommunicationException as e:
- report_item_list.append(node_communicator_exception_to_report_item(
- e,
- severity=Severities.WARNING
- ))
- report_item_list.append(reports.unable_to_get_sbd_status(
- node.label,
- "", #reason is in previous report item
- #warning is there implicit
- ))
- except (ValueError, KeyError) as e:
- report_item_list.append(reports.unable_to_get_sbd_status(
- node.label, str(e)
- ))
-
- tools.run_parallel(get_sbd_status, [([node], {}) for node in node_list])
- lib_env.report_processor.process_list(report_item_list)
-
- for node in node_list:
- if node not in successful_node_list:
- status_list.append({
- "node": node.label,
- "status": {
- "installed": None,
- "enabled": None,
- "running": None
- }
- })
- return status_list
+ com_cmd = GetSbdStatus(lib_env.report_processor)
+ com_cmd.set_targets(
+ lib_env.get_node_target_factory().get_target_list(
+ _get_cluster_nodes(lib_env)
+ )
+ )
+ return run_com(lib_env.get_node_communicator(), com_cmd)
def get_cluster_sbd_config(lib_env):
@@ -412,51 +351,13 @@ def get_cluster_sbd_config(lib_env):
lib_env -- LibraryEnvironment
"""
- node_list = _get_cluster_nodes(lib_env)
- config_list = []
- successful_node_list = []
- report_item_list = []
-
- def get_sbd_config(node):
- try:
- config_list.append({
- "node": node.label,
- "config": environment_file_to_dict(
- sbd.get_sbd_config(lib_env.node_communicator(), node)
- )
- })
- successful_node_list.append(node)
- except NodeCommandUnsuccessfulException as e:
- report_item_list.append(reports.unable_to_get_sbd_config(
- node.label,
- e.reason,
- Severities.WARNING
- ))
- except NodeCommunicationException as e:
- report_item_list.append(node_communicator_exception_to_report_item(
- e,
- severity=Severities.WARNING
- ))
- report_item_list.append(reports.unable_to_get_sbd_config(
- node.label,
- "", #reason is in previous report item
- Severities.WARNING
- ))
-
- tools.run_parallel(get_sbd_config, [([node], {}) for node in node_list])
- lib_env.report_processor.process_list(report_item_list)
-
- if not len(config_list):
- return []
-
- for node in node_list:
- if node not in successful_node_list:
- config_list.append({
- "node": node.label,
- "config": None
- })
- return config_list
-
+ com_cmd = GetSbdConfig(lib_env.report_processor)
+ com_cmd.set_targets(
+ lib_env.get_node_target_factory().get_target_list(
+ _get_cluster_nodes(lib_env)
+ )
+ )
+ return run_com(lib_env.get_node_communicator(), com_cmd)
def get_local_sbd_config(lib_env):
"""
@@ -470,8 +371,7 @@ def get_local_sbd_config(lib_env):
def _get_cluster_nodes(lib_env):
if lib_env.is_cman_cluster:
return lib_env.get_cluster_conf().get_nodes()
- else:
- return lib_env.get_corosync_conf().get_nodes()
+ return lib_env.get_corosync_conf().get_nodes()
def initialize_block_devices(lib_env, device_list, option_dict):
@@ -573,4 +473,3 @@ def set_message(lib_env, device, node_name, message):
)
lib_env.report_processor.process_list(report_item_list)
sbd.set_message(lib_env.cmd_runner(), device, node_name, message)
-
diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
index a3c3ad5..bb9fb98 100644
--- a/pcs/lib/commands/stonith.py
+++ b/pcs/lib/commands/stonith.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib.resource_agent import find_valid_stonith_agent_by_name as get_agent
diff --git a/pcs/lib/commands/stonith_agent.py b/pcs/lib/commands/stonith_agent.py
index ee4bc26..3c9a950 100644
--- a/pcs/lib/commands/stonith_agent.py
+++ b/pcs/lib/commands/stonith_agent.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib import resource_agent
diff --git a/pcs/lib/commands/test/cib_options/__init__.py b/pcs/lib/commands/test/cib_options/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/commands/test/cib_options/test_operations_defaults.py b/pcs/lib/commands/test/cib_options/test_operations_defaults.py
new file mode 100644
index 0000000..21fff6e
--- /dev/null
+++ b/pcs/lib/commands/test/cib_options/test_operations_defaults.py
@@ -0,0 +1,105 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.lib.commands import cib_options
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools import fixture
+from pcs.common import report_codes
+
+FIXTURE_INITIAL_DEFAULTS = """
+ <op_defaults>
+ <meta_attributes id="op_defaults-options">
+ <nvpair id="op_defaults-options-a" name="a" value="b"/>
+ <nvpair id="op_defaults-options-b" name="b" value="c"/>
+ </meta_attributes>
+ </op_defaults>
+"""
+
+class SetOperationsDefaults(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+
+ def tearDown(self):
+ self.env_assist.assert_reports([
+ fixture.warn(report_codes.DEFAULTS_CAN_BE_OVERRIDEN)
+ ])
+
+ def assert_options_produces_op_defaults_xml(self, options, op_defaults_xml):
+ self.config.env.push_cib(
+ replace={
+ "./configuration/op_defaults/meta_attributes": op_defaults_xml
+ }
+ )
+ cib_options.set_operations_defaults(self.env_assist.get_env(), options)
+
+ def test_change(self):
+ self.assert_options_produces_op_defaults_xml(
+ {
+ "a": "B",
+ "b": "C",
+ },
+ """
+ <meta_attributes id="op_defaults-options">
+ <nvpair id="op_defaults-options-a" name="a" value="B"/>
+ <nvpair id="op_defaults-options-b" name="b" value="C"/>
+ </meta_attributes>
+ """
+ )
+
+ def test_add(self):
+ self.assert_options_produces_op_defaults_xml(
+ {"c": "d"},
+ """
+ <meta_attributes id="op_defaults-options">
+ <nvpair id="op_defaults-options-a" name="a" value="b"/>
+ <nvpair id="op_defaults-options-b" name="b" value="c"/>
+ <nvpair id="op_defaults-options-c" name="c" value="d"/>
+ </meta_attributes>
+ """
+ )
+
+ def test_remove(self):
+ self.config.env.push_cib(
+ remove=
+ "./configuration/op_defaults/meta_attributes/nvpair[@name='a']"
+ )
+ cib_options.set_operations_defaults(
+ self.env_assist.get_env(),
+ {"a": ""},
+ )
+
+ def test_add_when_section_does_not_exists(self):
+ (self.config
+ .remove("runner.cib.load")
+ .runner.cib.load()
+ .env.push_cib(
+ optional_in_conf="""
+ <op_defaults>
+ <meta_attributes id="op_defaults-options">
+ <nvpair id="op_defaults-options-a" name="a"
+ value="b"
+ />
+ </meta_attributes>
+ </op_defaults>
+ """
+ )
+ )
+ cib_options.set_operations_defaults(
+ self.env_assist.get_env(),
+ {"a": "b"},
+ )
+
+ def test_remove_section_when_empty(self):
+ self.config.env.push_cib(remove="./configuration/op_defaults")
+ cib_options.set_operations_defaults(
+ self.env_assist.get_env(),
+ {
+ "a": "",
+ "b": "",
+ }
+ )
diff --git a/pcs/lib/commands/test/cib_options/test_resources_defaults.py b/pcs/lib/commands/test/cib_options/test_resources_defaults.py
new file mode 100644
index 0000000..a9819c3
--- /dev/null
+++ b/pcs/lib/commands/test/cib_options/test_resources_defaults.py
@@ -0,0 +1,109 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.lib.commands import cib_options
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools import fixture
+from pcs.common import report_codes
+
+FIXTURE_INITIAL_DEFAULTS = """
+ <rsc_defaults>
+ <meta_attributes id="rsc_defaults-options">
+ <nvpair id="rsc_defaults-options-a" name="a" value="b"/>
+ <nvpair id="rsc_defaults-options-b" name="b" value="c"/>
+ </meta_attributes>
+ </rsc_defaults>
+"""
+
+class SetResourcesDefaults(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+
+ def tearDown(self):
+ self.env_assist.assert_reports([
+ fixture.warn(report_codes.DEFAULTS_CAN_BE_OVERRIDEN)
+ ])
+
+ def assert_options_produces_rsc_defaults_xml(
+ self, options, rsc_defaults_xml
+ ):
+ self.config.env.push_cib(
+ replace={
+ "./configuration/rsc_defaults/meta_attributes": rsc_defaults_xml
+ }
+ )
+ cib_options.set_resources_defaults(self.env_assist.get_env(), options)
+
+ def test_change(self):
+ self.assert_options_produces_rsc_defaults_xml(
+ {
+ "a": "B",
+ "b": "C",
+ },
+ """
+ <meta_attributes id="rsc_defaults-options">
+ <nvpair id="rsc_defaults-options-a" name="a" value="B"/>
+ <nvpair id="rsc_defaults-options-b" name="b" value="C"/>
+ </meta_attributes>
+ """
+ )
+
+ def test_add(self):
+ self.assert_options_produces_rsc_defaults_xml(
+ {"c": "d"},
+ """
+ <meta_attributes id="rsc_defaults-options">
+ <nvpair id="rsc_defaults-options-a" name="a" value="b"/>
+ <nvpair id="rsc_defaults-options-b" name="b" value="c"/>
+ <nvpair id="rsc_defaults-options-c" name="c" value="d"/>
+ </meta_attributes>
+ """
+ )
+
+ def test_remove(self):
+ self.config.env.push_cib(
+ remove=
+ "./configuration/rsc_defaults/meta_attributes/nvpair[@name='a']"
+ )
+ cib_options.set_resources_defaults(
+ self.env_assist.get_env(),
+ {"a": ""},
+ )
+
+ def test_add_when_section_does_not_exists(self):
+ (self.config
+ .remove("runner.cib.load")
+ .runner.cib.load()
+ .env.push_cib(
+ optional_in_conf="""
+ <rsc_defaults>
+ <meta_attributes id="rsc_defaults-options">
+ <nvpair id="rsc_defaults-options-a" name="a"
+ value="b"
+ />
+ </meta_attributes>
+ </rsc_defaults>
+ """
+ )
+ )
+ cib_options.set_resources_defaults(
+ self.env_assist.get_env(),
+ {"a": "b"},
+ )
+
+ def test_remove_section_when_empty(self):
+ (self.config
+ .env.push_cib(remove="./configuration/rsc_defaults")
+ )
+ cib_options.set_resources_defaults(
+ self.env_assist.get_env(),
+ {
+ "a": "",
+ "b": "",
+ }
+ )
diff --git a/pcs/lib/commands/test/cluster/__init__.py b/pcs/lib/commands/test/cluster/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/commands/test/cluster/verify.py b/pcs/lib/commands/test/cluster/verify.py
new file mode 100644
index 0000000..cb4208b
--- /dev/null
+++ b/pcs/lib/commands/test/cluster/verify.py
@@ -0,0 +1,119 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.common import report_codes
+from pcs.lib.commands.cluster import verify
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+CRM_VERIFY_ERROR_REPORT = "someting wrong\nsomething else wrong"
+
+BAD_FENCING_TOPOLOGY = """
+ <fencing-topology>
+ <fencing-level devices="FX" index="2" target="node1" id="fl-node1-2"/>
+ </fencing-topology>
+"""
+
+BAD_FENCING_TOPOLOGY_REPORTS = [
+ fixture.error(
+ report_codes.STONITH_RESOURCES_DO_NOT_EXIST,
+ stonith_ids=["FX"],
+ ),
+ fixture.error(
+ report_codes.NODE_NOT_FOUND,
+ node="node1",
+ searched_types=[],
+ ),
+]
+
+
+class CibAsWholeValid(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.pcmk.verify()
+
+ def test_success_on_valid(self):
+ (self.config
+ .runner.cib.load()
+ .runner.pcmk.load_state()
+ )
+ verify(self.env_assist.get_env())
+
+ def test_fail_on_invalid_fence_topology(self):
+ (self.config
+ .runner.cib.load(optional_in_conf=BAD_FENCING_TOPOLOGY)
+ .runner.pcmk.load_state()
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: verify(self.env_assist.get_env()),
+ list(BAD_FENCING_TOPOLOGY_REPORTS)
+ )
+
+
+class CibAsWholeInvalid(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.pcmk.verify(stderr=CRM_VERIFY_ERROR_REPORT)
+
+ def assert_raises_invalid_cib_content(self, extra_reports=None):
+ extra_reports = extra_reports if extra_reports else []
+ self.env_assist.assert_raise_library_error(
+ lambda: verify(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.INVALID_CIB_CONTENT,
+ report=CRM_VERIFY_ERROR_REPORT,
+ ),
+ ] + extra_reports,
+ )
+
+ def test_fail_immediately_on_unloadable_cib(self):
+ self.config.runner.cib.load(returncode=1)
+ self.assert_raises_invalid_cib_content()
+
+ def test_continue_on_loadable_cib(self):
+ (self.config
+ .runner.cib.load()
+ .runner.pcmk.load_state()
+ )
+ self.assert_raises_invalid_cib_content()
+
+ def test_add_following_errors(self):
+ #More fencing topology tests are provided by tests of
+ #pcs.lib.commands.fencing_topology
+ (self.config
+ .runner.cib.load(optional_in_conf=BAD_FENCING_TOPOLOGY)
+ .runner.pcmk.load_state()
+ )
+ self.assert_raises_invalid_cib_content(
+ list(BAD_FENCING_TOPOLOGY_REPORTS)
+ )
+
+class CibIsMocked(TestCase):
+ def test_success_on_valid_cib(self):
+ cib_tempfile = "/fake/tmp/file"
+ env_assist, config = get_env_tools(test_case=self)
+ (config
+ .env.set_cib_data("<cib/>", cib_tempfile=cib_tempfile)
+ .runner.pcmk.verify(cib_tempfile=cib_tempfile)
+ .runner.cib.load()
+ .runner.pcmk.load_state()
+ )
+ verify(env_assist.get_env())
+
+class VerboseMode(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.pcmk.verify(verbose=True)
+
+ def test_success_on_valid_cib(self):
+ (self.config
+ .runner.cib.load()
+ .runner.pcmk.load_state()
+ )
+ verify(self.env_assist.get_env(), verbose=True)
diff --git a/pcs/lib/commands/test/resource/common.py b/pcs/lib/commands/test/resource/common.py
deleted file mode 100644
index ac8cb24..0000000
--- a/pcs/lib/commands/test/resource/common.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from __future__ import (
- absolute_import,
- division,
- print_function,
- unicode_literals,
-)
-
-import logging
-
-import pcs.lib.commands.test.resource.fixture as fixture
-from pcs.lib.env import LibraryEnvironment
-from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.integration_lib import Runner
-from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_unittest import TestCase, mock
-
-class CommonResourceTest(TestCase):
- @classmethod
- def setUpClass(cls):
- cls.runner = Runner()
- cls.patcher = mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: cls.runner
- )
- cls.patcher.start()
-
- cls.patcher_corosync = mock.patch.object(
- LibraryEnvironment,
- "get_corosync_conf_data",
- lambda self: open(rc("corosync.conf")).read()
- )
- cls.patcher_corosync.start()
-
- @classmethod
- def tearDownClass(cls):
- cls.patcher.stop()
- cls.patcher_corosync.stop()
-
- def setUp(self):
- self.env = LibraryEnvironment(
- mock.MagicMock(logging.Logger),
- MockLibraryReportProcessor()
- )
- self.cib_base_file = "cib-empty.xml"
-
-
-class ResourceWithoutStateTest(CommonResourceTest):
- def assert_command_effect(self, cib_pre, cmd, cib_post, reports=None):
- self.runner.set_runs(
- fixture.calls_cib(
- cib_pre,
- cib_post,
- cib_base_file=self.cib_base_file
- )
- )
- cmd()
- self.env.report_processor.assert_reports(reports if reports else [])
- self.runner.assert_everything_launched()
-
-
-class ResourceWithStateTest(CommonResourceTest):
- def assert_command_effect(
- self, cib_pre, status, cmd, cib_post, reports=None
- ):
- self.runner.set_runs(
- fixture.calls_cib_and_status(
- cib_pre,
- status,
- cib_post,
- cib_base_file=self.cib_base_file
- )
- )
- cmd()
- self.env.report_processor.assert_reports(reports if reports else [])
- self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_bundle_create.py b/pcs/lib/commands/test/resource/test_bundle_create.py
index 3bdeee9..e9f6c10 100644
--- a/pcs/lib/commands/test/resource/test_bundle_create.py
+++ b/pcs/lib/commands/test/resource/test_bundle_create.py
@@ -2,98 +2,104 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
+from functools import partial
from textwrap import dedent
from pcs.common import report_codes
+from pcs.lib import reports
from pcs.lib.commands import resource
-from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
-import pcs.lib.commands.test.resource.fixture as fixture
-from pcs.lib.errors import ReportItemSeverity as severities
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.lib.errors import (
+ LibraryError,
+ ReportItemSeverity as severities,
+)
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.misc import skip_unless_pacemaker_supports_bundle
+from pcs.test.tools.pcs_unittest import TestCase
-class CommonTest(ResourceWithoutStateTest):
- fixture_cib_pre = "<resources />"
- fixture_resources_bundle_simple = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- </bundle>
- </resources>
- """
+TIMEOUT=10
- def setUp(self):
- super(CommonTest, self).setUp()
- self.cib_base_file = "cib-empty-2.8.xml"
+get_env_tools = partial(
+ get_env_tools,
+ base_cib_filename="cib-empty-2.8.xml"
+)
- def fixture_cib_resources(self, cib):
- return fixture.cib_resources(cib, cib_base_file=self.cib_base_file)
+def simple_bundle_create(env, wait=TIMEOUT, disabled=False):
+ return resource.bundle_create(
+ env, "B1", "docker",
+ container_options={"image": "pcs:test"},
+ ensure_disabled=disabled,
+ wait=wait,
+ )
-class MinimalCreate(CommonTest):
- def test_success(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- container_options={"image": "pcs:test", }
- ),
- self.fixture_resources_bundle_simple
+fixture_cib_pre = "<resources />"
+fixture_resources_bundle_simple = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+"""
+
+class MinimalCreate(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.cib.load()
+ .env.push_cib(resources=fixture_resources_bundle_simple)
)
+ def test_success(self):
+ simple_bundle_create(self.env_assist.get_env(), wait=False)
+
def test_errors(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
- lambda: resource.bundle_create(self.env, "B#1", "nonsense"),
- (
- severities.ERROR,
- report_codes.INVALID_ID,
- {
- "invalid_character": "#",
- "id": "B#1",
- "id_description": "bundle name",
- "is_first_char": False,
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "container type",
- "option_value": "nonsense",
- "allowed_values": ("docker", ),
- },
- None
+ self.config.remove("env.push_cib")
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env_assist.get_env(), "B#1", "nonsense"
),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "invalid_character": "#",
+ "id": "B#1",
+ "id_description": "bundle name",
+ "is_first_char": False,
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "container type",
+ "option_value": "nonsense",
+ "allowed_values": ("docker", ),
+ },
+ None
+ ),
+ ]
)
- self.runner.assert_everything_launched()
def test_cib_upgrade(self):
- self.runner.set_runs(
- fixture.calls_cib_load_and_upgrade(self.fixture_cib_pre)
- +
- fixture.calls_cib(
- self.fixture_cib_pre,
- self.fixture_resources_bundle_simple,
- cib_base_file=self.cib_base_file
+ (self.config
+ .runner.cib.load(
+ name="load_cib_old_version",
+ filename="cib-empty.xml",
+ before="runner.cib.load"
)
+ .runner.cib.upgrade(before="runner.cib.load")
)
- resource.bundle_create(
- self.env, "B1", "docker",
- container_options={"image": "pcs:test", }
- )
+ simple_bundle_create(self.env_assist.get_env(), wait=False)
- self.env.report_processor.assert_reports([
+ self.env_assist.assert_reports([
(
severities.INFO,
report_codes.CIB_UPGRADE_SUCCESSFUL,
@@ -102,11 +108,9 @@ class MinimalCreate(CommonTest):
None
),
])
- self.runner.assert_everything_launched()
-
-class CreateDocker(CommonTest):
+class CreateDocker(TestCase):
allowed_options = [
"image",
"masters",
@@ -117,32 +121,17 @@ class CreateDocker(CommonTest):
"run-command",
]
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_cib_pre)
+
def test_minimal(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- container_options={"image": "pcs:test", }
- ),
- self.fixture_resources_bundle_simple
- )
+ self.config.env.push_cib(resources=fixture_resources_bundle_simple)
+ simple_bundle_create(self.env_assist.get_env(), wait=False)
def test_all_options(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- container_options={
- "image": "pcs:test",
- "masters": "0",
- "network": "extra network settings",
- "options": "extra options",
- "run-command": "/bin/true",
- "replicas": "4",
- "replicas-per-host": "2",
- }
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<docker
@@ -158,16 +147,23 @@ class CreateDocker(CommonTest):
</resources>
"""
)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ container_options={
+ "image": "pcs:test",
+ "masters": "0",
+ "network": "extra network settings",
+ "options": "extra options",
+ "run-command": "/bin/true",
+ "replicas": "4",
+ "replicas-per-host": "2",
+ }
+ )
def test_options_errors(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_create(
- self.env, "B1", "docker",
+ self.env_assist.get_env(), "B1", "docker",
container_options={
"replicas-per-host": "0",
"replicas": "0",
@@ -175,136 +171,128 @@ class CreateDocker(CommonTest):
},
force_options=True
),
- (
- severities.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {
- "option_type": "container",
- "option_names": ["image", ],
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "masters",
- "option_value": "-1",
- "allowed_values": "a non-negative integer",
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "replicas",
- "option_value": "0",
- "allowed_values": "a positive integer",
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "replicas-per-host",
- "option_value": "0",
- "allowed_values": "a positive integer",
- },
- None
- ),
+ [
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_type": "container",
+ "option_names": ["image", ],
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "masters",
+ "option_value": "-1",
+ "allowed_values": "a non-negative integer",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "replicas",
+ "option_value": "0",
+ "allowed_values": "a positive integer",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "replicas-per-host",
+ "option_value": "0",
+ "allowed_values": "a positive integer",
+ },
+ None
+ ),
+ ]
)
- self.runner.assert_everything_launched()
def test_empty_image(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_create(
- self.env, "B1", "docker",
+ self.env_assist.get_env(), "B1", "docker",
container_options={
"image": "",
},
force_options=True
),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "image",
- "option_value": "",
- "allowed_values": "image name",
- },
- None
- ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "image",
+ "option_value": "",
+ "allowed_values": "image name",
+ },
+ None
+ ),
+ ]
)
- self.runner.assert_everything_launched()
def test_unknow_option(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_create(
- self.env, "B1", "docker",
+ self.env_assist.get_env(), "B1", "docker",
container_options={
"image": "pcs:test",
"extra": "option",
}
),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION,
- {
- "option_names": ["extra", ],
- "option_type": "container",
- "allowed": self.allowed_options,
- },
- report_codes.FORCE_OPTIONS
- ),
- )
- self.runner.assert_everything_launched()
-
- def test_unknow_option_forced(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- container_options={
- "image": "pcs:test",
- "extra": "option",
- },
- force_options=True
- ),
- """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" extra="option" />
- </bundle>
- </resources>
- """,
[
(
- severities.WARNING,
+ severities.ERROR,
report_codes.INVALID_OPTION,
{
"option_names": ["extra", ],
"option_type": "container",
"allowed": self.allowed_options,
},
- None
+ report_codes.FORCE_OPTIONS
),
]
)
+ def test_unknow_option_forced(self):
+ self.config.env.push_cib(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" extra="option" />
+ </bundle>
+ </resources>
+ """
+ )
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ container_options={
+ "image": "pcs:test",
+ "extra": "option",
+ },
+ force_options=True
+ )
+ self.env_assist.assert_reports([
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "container",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ ])
+
-class CreateWithNetwork(CommonTest):
+class CreateWithNetwork(TestCase):
allowed_options = [
"control-port",
"host-interface",
@@ -312,31 +300,22 @@ class CreateWithNetwork(CommonTest):
"ip-range-start",
]
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_cib_pre)
+
+
def test_no_options(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {"image": "pcs:test", },
- network_options={}
- ),
- self.fixture_resources_bundle_simple
+ self.config.env.push_cib(resources=fixture_resources_bundle_simple)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {"image": "pcs:test", },
+ network_options={}
)
def test_all_options(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {"image": "pcs:test", },
- network_options={
- "control-port": "12345",
- "host-interface": "eth0",
- "host-netmask": "24",
- "ip-range-start": "192.168.100.200",
- }
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<docker image="pcs:test" />
@@ -350,16 +329,21 @@ class CreateWithNetwork(CommonTest):
</resources>
"""
)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {"image": "pcs:test", },
+ network_options={
+ "control-port": "12345",
+ "host-interface": "eth0",
+ "host-netmask": "24",
+ "ip-range-start": "192.168.100.200",
+ }
+ )
def test_options_errors(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_create(
- self.env, "B1", "docker",
+ self.env_assist.get_env(), "B1", "docker",
{"image": "pcs:test", },
network_options={
"control-port": "0",
@@ -367,87 +351,88 @@ class CreateWithNetwork(CommonTest):
"extra": "option",
}
),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "control-port",
- "option_value": "0",
- "allowed_values": "a port number (1-65535)",
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "host-netmask",
- "option_value": "abc",
- "allowed_values": "a number of bits of the mask (1-32)",
- },
- report_codes.FORCE_OPTIONS
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION,
- {
- "option_names": ["extra", ],
- "option_type": "network",
- "allowed": self.allowed_options,
- },
- report_codes.FORCE_OPTIONS
- ),
- )
- self.runner.assert_everything_launched()
-
- def test_options_forced(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {
- "image": "pcs:test",
- },
- network_options={
- "host-netmask": "abc",
- "extra": "option",
- },
- force_options=True
- ),
- """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network host-netmask="abc" extra="option" />
- </bundle>
- </resources>
- """,
[
(
- severities.WARNING,
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "control-port",
+ "option_value": "0",
+ "allowed_values": "a port number (1-65535)",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
report_codes.INVALID_OPTION_VALUE,
{
"option_name": "host-netmask",
"option_value": "abc",
"allowed_values": "a number of bits of the mask (1-32)",
},
- None
+ report_codes.FORCE_OPTIONS
),
(
- severities.WARNING,
+ severities.ERROR,
report_codes.INVALID_OPTION,
{
"option_names": ["extra", ],
"option_type": "network",
"allowed": self.allowed_options,
},
- None
+ report_codes.FORCE_OPTIONS
),
]
)
+ def test_options_forced(self):
+ self.config.env.push_cib(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-netmask="abc" extra="option" />
+ </bundle>
+ </resources>
+ """
+ )
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {
+ "image": "pcs:test",
+ },
+ network_options={
+ "host-netmask": "abc",
+ "extra": "option",
+ },
+ force_options=True
+ )
+
+ self.env_assist.assert_reports([
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "host-netmask",
+ "option_value": "abc",
+ "allowed_values": "a number of bits of the mask (1-32)",
+ },
+ None
+ ),
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "network",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ ])
-class CreateWithPortMap(CommonTest):
+
+class CreateWithPortMap(TestCase):
allowed_options = [
"id",
"internal-port",
@@ -455,39 +440,21 @@ class CreateWithPortMap(CommonTest):
"range",
]
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_cib_pre)
+
def test_no_options(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {"image": "pcs:test", },
- port_map=[]
- ),
- self.fixture_resources_bundle_simple
+ self.config.env.push_cib(resources=fixture_resources_bundle_simple)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {"image": "pcs:test", },
+ port_map=[]
)
def test_several_mappings_and_handle_their_ids(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {"image": "pcs:test", },
- port_map=[
- {
- "port": "1001",
- },
- {
- # use an autogenerated id of the previous item
- "id": "B1-port-map-1001",
- "port": "2000",
- "internal-port": "2002",
- },
- {
- "range": "3000-3300",
- },
- ]
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<docker image="pcs:test" />
@@ -507,16 +474,29 @@ class CreateWithPortMap(CommonTest):
</resources>
"""
)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {"image": "pcs:test", },
+ port_map=[
+ {
+ "port": "1001",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-port-map-1001",
+ "port": "2000",
+ "internal-port": "2002",
+ },
+ {
+ "range": "3000-3300",
+ },
+ ]
+ )
def test_options_errors(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_create(
- self.env, "B1", "docker",
+ self.env_assist.get_env(), "B1", "docker",
{"image": "pcs:test", },
port_map=[
{
@@ -538,187 +518,186 @@ class CreateWithPortMap(CommonTest):
],
force_options=True
),
- # first
- (
- severities.ERROR,
- report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
- {
- "option_type": "port-map",
- "option_names": ["port", "range"],
- },
- None
- ),
- # second
- (
- severities.ERROR,
- report_codes.INVALID_ID,
- {
- "invalid_character": "#",
- "id": "not#valid",
- "id_description": "port-map id",
- "is_first_char": False,
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
- {
- "option_type": "port-map",
- "option_names": ["port", "range"],
- },
- None
- ),
- # third
- (
- severities.ERROR,
- report_codes.PREREQUISITE_OPTION_IS_MISSING,
- {
- "option_type": "port-map",
- "option_name": "internal-port",
- "prerequisite_type": "port-map",
- "prerequisite_name": "port",
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
- {
- "option_type": "port-map",
- "option_names": ["port", "range"],
- },
- None
- ),
- # fourth
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "port",
- "option_value": "abc",
- "allowed_values": "a port number (1-65535)",
- },
- None
- ),
- # fifth
- (
- severities.ERROR,
- report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
- {
- "option_names": ["port", "range", ],
- "option_type": "port-map",
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "internal-port",
- "option_value": "def",
- "allowed_values": "a port number (1-65535)",
- },
- None
- ),
- )
- self.runner.assert_everything_launched()
-
- def test_forceable_options_errors(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {"image": "pcs:test", },
- port_map=[
+ [
+ # first
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
{
- "range": "3000",
- "extra": "option",
+ "option_type": "port-map",
+ "option_names": ["port", "range"],
},
- ]
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION,
- {
- "option_names": ["extra", ],
- "option_type": "port-map",
- "allowed": self.allowed_options,
- },
- report_codes.FORCE_OPTIONS
- ),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "range",
- "option_value": "3000",
- "allowed_values": "port-port",
- },
- report_codes.FORCE_OPTIONS
- ),
- )
-
- def test_forceable_options_errors_forced(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {
- "image": "pcs:test",
- },
- port_map=[
+ None
+ ),
+ # second
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
{
- "range": "3000",
- "extra": "option",
+ "invalid_character": "#",
+ "id": "not#valid",
+ "id_description": "port-map id",
+ "is_first_char": False,
},
- ],
- force_options=True
- ),
- """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network>
- <port-mapping
- id="B1-port-map-3000"
- extra="option"
- range="3000"
- />
- </network>
- </bundle>
- </resources>
- """,
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_type": "port-map",
+ "option_names": ["port", "range"],
+ },
+ None
+ ),
+ # third
+ (
+ severities.ERROR,
+ report_codes.PREREQUISITE_OPTION_IS_MISSING,
+ {
+ "option_type": "port-map",
+ "option_name": "internal-port",
+ "prerequisite_type": "port-map",
+ "prerequisite_name": "port",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_type": "port-map",
+ "option_names": ["port", "range"],
+ },
+ None
+ ),
+ # fourth
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "port",
+ "option_value": "abc",
+ "allowed_values": "a port number (1-65535)",
+ },
+ None
+ ),
+ # fifth
+ (
+ severities.ERROR,
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ {
+ "option_names": ["port", "range", ],
+ "option_type": "port-map",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "internal-port",
+ "option_value": "def",
+ "allowed_values": "a port number (1-65535)",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_forceable_options_errors(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {"image": "pcs:test", },
+ port_map=[
+ {
+ "range": "3000",
+ "extra": "option",
+ },
+ ]
+ ),
[
(
- severities.WARNING,
+ severities.ERROR,
report_codes.INVALID_OPTION,
{
"option_names": ["extra", ],
"option_type": "port-map",
"allowed": self.allowed_options,
},
- None
+ report_codes.FORCE_OPTIONS
),
(
- severities.WARNING,
+ severities.ERROR,
report_codes.INVALID_OPTION_VALUE,
{
"option_name": "range",
"option_value": "3000",
"allowed_values": "port-port",
},
- None
+ report_codes.FORCE_OPTIONS
),
]
)
+ def test_forceable_options_errors_forced(self):
+ self.config.env.push_cib(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network>
+ <port-mapping
+ id="B1-port-map-3000"
+ extra="option"
+ range="3000"
+ />
+ </network>
+ </bundle>
+ </resources>
+ """,
+ )
+
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {
+ "image": "pcs:test",
+ },
+ port_map=[
+ {
+ "range": "3000",
+ "extra": "option",
+ },
+ ],
+ force_options=True
+ )
+
+ self.env_assist.assert_reports([
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "port-map",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "range",
+ "option_value": "3000",
+ "allowed_values": "port-port",
+ },
+ None
+ ),
+ ])
+
-class CreateWithStorageMap(CommonTest):
+class CreateWithStorageMap(TestCase):
allowed_options = [
"id",
"options",
@@ -727,38 +706,14 @@ class CreateWithStorageMap(CommonTest):
"target-dir",
]
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_cib_pre)
+
+
def test_several_mappings_and_handle_their_ids(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {"image": "pcs:test", },
- storage_map=[
- {
- "source-dir": "/tmp/docker1a",
- "target-dir": "/tmp/docker1b",
- },
- {
- # use an autogenerated id of the previous item
- "id": "B1-storage-map",
- "source-dir": "/tmp/docker2a",
- "target-dir": "/tmp/docker2b",
- "options": "extra options 1"
- },
- {
- "source-dir-root": "/tmp/docker3a",
- "target-dir": "/tmp/docker3b",
- },
- {
- # use an autogenerated id of the previous item
- "id": "B1-storage-map-2",
- "source-dir-root": "/tmp/docker4a",
- "target-dir": "/tmp/docker4b",
- "options": "extra options 2"
- },
- ]
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<docker image="pcs:test" />
@@ -790,16 +745,39 @@ class CreateWithStorageMap(CommonTest):
</resources>
"""
)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {"image": "pcs:test", },
+ storage_map=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-storage-map",
+ "source-dir": "/tmp/docker2a",
+ "target-dir": "/tmp/docker2b",
+ "options": "extra options 1"
+ },
+ {
+ "source-dir-root": "/tmp/docker3a",
+ "target-dir": "/tmp/docker3b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-storage-map-2",
+ "source-dir-root": "/tmp/docker4a",
+ "target-dir": "/tmp/docker4b",
+ "options": "extra options 2"
+ },
+ ]
+ )
def test_options_errors(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_create(
- self.env, "B1", "docker",
+ self.env_assist.get_env(), "B1", "docker",
{"image": "pcs:test", },
storage_map=[
{
@@ -813,57 +791,54 @@ class CreateWithStorageMap(CommonTest):
],
force_options=True
),
- # first
- (
- severities.ERROR,
- report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
- {
- "option_type": "storage-map",
- "option_names": ["source-dir", "source-dir-root"],
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {
- "option_type": "storage-map",
- "option_names": ["target-dir", ],
- },
- None
- ),
- # second
- (
- severities.ERROR,
- report_codes.INVALID_ID,
- {
- "invalid_character": "#",
- "id": "not#valid",
- "id_description": "storage-map id",
- "is_first_char": False,
- },
- None
- ),
- (
- severities.ERROR,
- report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
- {
- "option_type": "storage-map",
- "option_names": ["source-dir", "source-dir-root"],
- },
- None
- ),
+ [
+ # first
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_type": "storage-map",
+ "option_names": ["source-dir", "source-dir-root"],
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_type": "storage-map",
+ "option_names": ["target-dir", ],
+ },
+ None
+ ),
+ # second
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "invalid_character": "#",
+ "id": "not#valid",
+ "id_description": "storage-map id",
+ "is_first_char": False,
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ {
+ "option_type": "storage-map",
+ "option_names": ["source-dir", "source-dir-root"],
+ },
+ None
+ ),
+ ]
)
def test_forceable_options_errors(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_pre)
- )
- )
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_create(
- self.env, "B1", "docker",
+ self.env_assist.get_env(), "B1", "docker",
{"image": "pcs:test", },
storage_map=[
{
@@ -873,36 +848,23 @@ class CreateWithStorageMap(CommonTest):
},
]
),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION,
- {
- "option_names": ["extra", ],
- "option_type": "storage-map",
- "allowed": self.allowed_options,
- },
- report_codes.FORCE_OPTIONS
- ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "storage-map",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ ]
)
def test_forceable_options_errors_forced(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- {
- "image": "pcs:test",
- },
- storage_map=[
- {
- "source-dir": "/tmp/docker1a",
- "target-dir": "/tmp/docker1b",
- "extra": "option",
- },
- ],
- force_options=True
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<docker image="pcs:test" />
@@ -917,6 +879,24 @@ class CreateWithStorageMap(CommonTest):
</bundle>
</resources>
""",
+ )
+
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ {
+ "image": "pcs:test",
+ },
+ storage_map=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ "extra": "option",
+ },
+ ],
+ force_options=True
+ )
+
+ self.env_assist.assert_reports(
[
(
severities.WARNING,
@@ -932,19 +912,14 @@ class CreateWithStorageMap(CommonTest):
)
-class CreateWithMeta(CommonTest):
+class CreateWithMeta(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_cib_pre)
+
def test_success(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- container_options={"image": "pcs:test", },
- meta_attributes={
- "target-role": "Stopped",
- "is-managed": "false",
- }
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<docker image="pcs:test" />
@@ -958,16 +933,18 @@ class CreateWithMeta(CommonTest):
</resources>
"""
)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ container_options={"image": "pcs:test", },
+ meta_attributes={
+ "target-role": "Stopped",
+ "is-managed": "false",
+ }
+ )
def test_disabled(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- container_options={"image": "pcs:test", },
- ensure_disabled=True
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<meta_attributes id="B1-meta_attributes">
@@ -979,68 +956,20 @@ class CreateWithMeta(CommonTest):
</resources>
"""
)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ container_options={"image": "pcs:test", },
+ ensure_disabled=True
+ )
+
+class CreateWithAllOptions(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_cib_pre)
-class CreateWithAllOptions(CommonTest):
def test_success(self):
- self.assert_command_effect(
- self.fixture_cib_pre,
- lambda: resource.bundle_create(
- self.env, "B1", "docker",
- container_options={
- "image": "pcs:test",
- "masters": "0",
- "network": "extra network settings",
- "options": "extra options",
- "run-command": "/bin/true",
- "replicas": "4",
- "replicas-per-host": "2",
- },
- network_options={
- "control-port": "12345",
- "host-interface": "eth0",
- "host-netmask": "24",
- "ip-range-start": "192.168.100.200",
- },
- port_map=[
- {
- "port": "1001",
- },
- {
- # use an autogenerated id of the previous item
- "id": "B1-port-map-1001",
- "port": "2000",
- "internal-port": "2002",
- },
- {
- "range": "3000-3300",
- },
- ],
- storage_map=[
- {
- "source-dir": "/tmp/docker1a",
- "target-dir": "/tmp/docker1b",
- },
- {
- # use an autogenerated id of the previous item
- "id": "B1-storage-map",
- "source-dir": "/tmp/docker2a",
- "target-dir": "/tmp/docker2b",
- "options": "extra options 1"
- },
- {
- "source-dir-root": "/tmp/docker3a",
- "target-dir": "/tmp/docker3b",
- },
- {
- # use an autogenerated id of the previous item
- "id": "B1-port-map-1001-1",
- "source-dir-root": "/tmp/docker4a",
- "target-dir": "/tmp/docker4b",
- "options": "extra options 2"
- },
- ]
- ),
- """
+ self.config.env.push_cib(
+ resources="""
<resources>
<bundle id="B1">
<docker
@@ -1097,9 +1026,65 @@ class CreateWithAllOptions(CommonTest):
</resources>
"""
)
+ resource.bundle_create(
+ self.env_assist.get_env(), "B1", "docker",
+ container_options={
+ "image": "pcs:test",
+ "masters": "0",
+ "network": "extra network settings",
+ "options": "extra options",
+ "run-command": "/bin/true",
+ "replicas": "4",
+ "replicas-per-host": "2",
+ },
+ network_options={
+ "control-port": "12345",
+ "host-interface": "eth0",
+ "host-netmask": "24",
+ "ip-range-start": "192.168.100.200",
+ },
+ port_map=[
+ {
+ "port": "1001",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-port-map-1001",
+ "port": "2000",
+ "internal-port": "2002",
+ },
+ {
+ "range": "3000-3300",
+ },
+ ],
+ storage_map=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-storage-map",
+ "source-dir": "/tmp/docker2a",
+ "target-dir": "/tmp/docker2b",
+ "options": "extra options 1"
+ },
+ {
+ "source-dir-root": "/tmp/docker3a",
+ "target-dir": "/tmp/docker3b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-port-map-1001-1",
+ "source-dir-root": "/tmp/docker4a",
+ "target-dir": "/tmp/docker4b",
+ "options": "extra options 2"
+ },
+ ]
+ )
-class Wait(CommonTest):
+class Wait(TestCase):
fixture_status_running = """
<resources>
<bundle id="B1" managed="true">
@@ -1142,132 +1127,104 @@ class Wait(CommonTest):
</resources>
"""
- timeout = 10
-
- def simple_bundle_create(self, wait=False, disabled=False):
- return resource.bundle_create(
- self.env, "B1", "docker",
- container_options={"image": "pcs:test"},
- ensure_disabled=disabled,
- wait=wait,
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=fixture_cib_pre)
)
def test_wait_fail(self):
- fixture_wait_timeout_error = dedent(
+ wait_error_message = dedent(
"""\
Pending actions:
Action 12: B1-node2-stop on node2
Error performing operation: Timer expired
"""
- )
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib(
- self.fixture_cib_pre,
- self.fixture_resources_bundle_simple,
- cib_base_file=self.cib_base_file,
+ ).strip()
+ self.config.env.push_cib(
+ resources=fixture_resources_bundle_simple,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(wait_error_message)
)
- +
- fixture.call_wait(self.timeout, 62, fixture_wait_timeout_error)
)
- assert_raise_library_error(
- lambda: self.simple_bundle_create(self.timeout),
- fixture.report_wait_for_idle_timed_out(
- fixture_wait_timeout_error
- ),
+ self.env_assist.assert_raise_library_error(
+ lambda: simple_bundle_create(self.env_assist.get_env()),
+ [
+ fixture.report_wait_for_idle_timed_out(wait_error_message)
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_wait_ok_run_ok(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib(
- self.fixture_cib_pre,
- self.fixture_resources_bundle_simple,
- cib_base_file=self.cib_base_file,
+ (self.config
+ .env.push_cib(
+ resources=fixture_resources_bundle_simple,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(self.timeout)
- +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_running
- ))
+ .runner.pcmk.load_state(resources=self.fixture_status_running)
)
- self.simple_bundle_create(self.timeout)
- self.env.report_processor.assert_reports([
+ simple_bundle_create(self.env_assist.get_env())
+ self.env_assist.assert_reports([
fixture.report_resource_running(
"B1", {"Started": ["node1", "node2"]}
),
])
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_wait_ok_run_fail(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib(
- self.fixture_cib_pre,
- self.fixture_resources_bundle_simple,
- cib_base_file=self.cib_base_file,
+ (self.config
+ .env.push_cib(
+ resources=fixture_resources_bundle_simple,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(self.timeout)
- +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_not_running
- ))
+ .runner.pcmk.load_state(resources=self.fixture_status_not_running)
)
- assert_raise_library_error(
- lambda: self.simple_bundle_create(self.timeout),
- fixture.report_resource_not_running("B1", severities.ERROR),
+ self.env_assist.assert_raise_library_error(
+ lambda: simple_bundle_create(self.env_assist.get_env()),
+ [
+ fixture.report_resource_not_running("B1", severities.ERROR),
+ ]
)
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_disabled_wait_ok_run_ok(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib(
- self.fixture_cib_pre,
- self.fixture_resources_bundle_simple_disabled,
- cib_base_file=self.cib_base_file,
+ (self.config
+ .env.push_cib(
+ resources=self.fixture_resources_bundle_simple_disabled,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(self.timeout)
- +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_not_running
- ))
+ .runner.pcmk.load_state(resources=self.fixture_status_not_running)
)
- self.simple_bundle_create(self.timeout, disabled=True)
- self.runner.assert_everything_launched()
+ simple_bundle_create(self.env_assist.get_env(), disabled=True)
+ self.env_assist.assert_reports([
+ (
+ severities.INFO,
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ {
+ "resource_id": "B1"
+ },
+ None
+ )
+ ])
@skip_unless_pacemaker_supports_bundle
def test_disabled_wait_ok_run_fail(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib(
- self.fixture_cib_pre,
- self.fixture_resources_bundle_simple_disabled,
- cib_base_file=self.cib_base_file,
+ (self.config
+ .env.push_cib(
+ resources=self.fixture_resources_bundle_simple_disabled,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(self.timeout)
- +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_running
- ))
+ .runner.pcmk.load_state(resources=self.fixture_status_running)
)
- assert_raise_library_error(
- lambda: self.simple_bundle_create(self.timeout, disabled=True),
- fixture.report_resource_running(
- "B1", {"Started": ["node1", "node2"]}, severities.ERROR
- )
+ self.env_assist.assert_raise_library_error(
+ lambda:
+ simple_bundle_create(self.env_assist.get_env(), disabled=True),
+ [
+ fixture.report_resource_running(
+ "B1", {"Started": ["node1", "node2"]}, severities.ERROR
+ )
+ ]
)
- self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_bundle_update.py b/pcs/lib/commands/test/resource/test_bundle_update.py
index 7a1ee49..8993909 100644
--- a/pcs/lib/commands/test/resource/test_bundle_update.py
+++ b/pcs/lib/commands/test/resource/test_bundle_update.py
@@ -2,121 +2,128 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
+from functools import partial
from textwrap import dedent
from pcs.common import report_codes
+from pcs.lib import reports
from pcs.lib.commands import resource
-from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
-import pcs.lib.commands.test.resource.fixture as fixture
-from pcs.lib.errors import ReportItemSeverity as severities
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.lib.errors import (
+ LibraryError,
+ ReportItemSeverity as severities,
+)
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.misc import skip_unless_pacemaker_supports_bundle
+from pcs.test.tools.pcs_unittest import TestCase
-class CommonTest(ResourceWithoutStateTest):
- fixture_cib_minimal = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- </bundle>
- </resources>
- """
- def setUp(self):
- super(CommonTest, self).setUp()
- self.cib_base_file = "cib-empty-2.8.xml"
+TIMEOUT=10
+
+get_env_tools = partial(
+ get_env_tools,
+ base_cib_filename="cib-empty-2.8.xml"
+)
+
+def simple_bundle_update(env, wait=TIMEOUT):
+ return resource.bundle_update(env, "B1", {"image": "new:image"}, wait=wait)
- def fixture_cib_resources(self, cib):
- return fixture.cib_resources(cib, cib_base_file=self.cib_base_file)
+fixture_resources_minimal = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+"""
+
+class Basics(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class Basics(CommonTest):
def test_nonexisting_id(self):
- fixture_cib_pre = "<resources />"
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(fixture_cib_pre)
- )
- )
- assert_raise_library_error(
- lambda: resource.bundle_update(self.env, "B1"),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "B1",
- "id_description": "bundle",
- "context_type": "resources",
- "context_id": "",
- },
- None
- ),
+ self.config.runner.cib.load()
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.bundle_update(self.env_assist.get_env(), "B1"),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "B1",
+ "id_description": "bundle",
+ "context_type": "resources",
+ "context_id": "",
+ },
+ None
+ ),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_not_bundle_id(self):
- fixture_cib_pre = """
- <resources>
- <primitive id="B1" />
- </resources>
- """
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(fixture_cib_pre)
- )
+ self.config.runner.cib.load(
+ resources="""
+ <resources>
+ <primitive id="B1" />
+ </resources>
+ """
)
- assert_raise_library_error(
- lambda: resource.bundle_update(self.env, "B1"),
- (
- severities.ERROR,
- report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
- {
- "id": "B1",
- "expected_types": ["bundle"],
- "current_type": "primitive",
- },
- None
- ),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.bundle_update(self.env_assist.get_env(), "B1"),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ {
+ "id": "B1",
+ "expected_types": ["bundle"],
+ "current_type": "primitive",
+ },
+ None
+ ),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_no_updates(self):
- fixture_cib_pre = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- </bundle>
- </resources>
- """
- self.assert_command_effect(
- fixture_cib_pre,
- lambda: resource.bundle_update(self.env, "B1"),
- fixture_cib_pre
+ (self.config
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+ )
+ .env.push_cib()
)
+ resource.bundle_update(self.env_assist.get_env(), "B1")
+
def test_cib_upgrade(self):
- fixture_cib_pre = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- </bundle>
- </resources>
- """
- self.runner.set_runs(
- fixture.calls_cib_load_and_upgrade(fixture_cib_pre)
- +
- fixture.calls_cib(
- fixture_cib_pre,
- fixture_cib_pre,
- cib_base_file=self.cib_base_file
+ (self.config
+ .runner.cib.load(
+ filename="cib-empty.xml",
+ name="load_cib_old_version"
+ )
+ .runner.cib.upgrade()
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
)
+ .env.push_cib()
)
-
- resource.bundle_update(self.env, "B1")
-
- self.env.report_processor.assert_reports([
+ resource.bundle_update(self.env_assist.get_env(), "B1")
+ self.env_assist.assert_reports([
(
severities.INFO,
report_codes.CIB_UPGRADE_SUCCESSFUL,
@@ -125,10 +132,8 @@ class Basics(CommonTest):
None
),
])
- self.runner.assert_everything_launched()
-
-class ContainerDocker(CommonTest):
+class ContainerDocker(TestCase):
allowed_options = [
"image",
"masters",
@@ -147,127 +152,134 @@ class ContainerDocker(CommonTest):
</resources>
"""
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_success(self):
- fixture_cib_pre = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" masters="3" replicas="6"/>
- </bundle>
- </resources>
- """
- fixture_cib_post = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" options="test" replicas="3" />
- </bundle>
- </resources>
- """
- self.assert_command_effect(
- fixture_cib_pre,
- lambda: resource.bundle_update(
- self.env, "B1",
- container_options={
- "options": "test",
- "replicas": "3",
- "masters": "",
- }
- ),
- fixture_cib_post
+ (self.config
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" masters="3" replicas="6"/>
+ </bundle>
+ </resources>
+ """
+ )
+ .env.push_cib(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" options="test" replicas="3"
+ />
+ </bundle>
+ </resources>
+ """
+ )
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ container_options={
+ "options": "test",
+ "replicas": "3",
+ "masters": "",
+ }
)
def test_cannot_remove_required_options(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_minimal)
- )
- )
- assert_raise_library_error(
+ self.config.runner.cib.load(resources=fixture_resources_minimal)
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_update(
- self.env, "B1",
+ self.env_assist.get_env(),
+ "B1",
container_options={
"image": "",
"options": "test",
},
force_options=True
),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION_VALUE,
- {
- "option_name": "image",
- "option_value": "",
- "allowed_values": "image name",
- },
- None
- ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "image",
+ "option_value": "",
+ "allowed_values": "image name",
+ },
+ None
+ ),
+ ]
)
- self.runner.assert_everything_launched()
def test_unknow_option(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_minimal)
- )
- )
- assert_raise_library_error(
+ self.config.runner.cib.load(resources=fixture_resources_minimal)
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_update(
- self.env, "B1",
+ self.env_assist.get_env(),
+ "B1",
container_options={
"extra": "option",
}
),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION,
- {
- "option_names": ["extra", ],
- "option_type": "container",
- "allowed": self.allowed_options,
- },
- report_codes.FORCE_OPTIONS
- ),
- )
- self.runner.assert_everything_launched()
-
- def test_unknow_option_forced(self):
- self.assert_command_effect(
- self.fixture_cib_minimal,
- lambda: resource.bundle_update(
- self.env, "B1",
- container_options={
- "extra": "option",
- },
- force_options=True
- ),
- self.fixture_cib_extra_option,
[
(
- severities.WARNING,
+ severities.ERROR,
report_codes.INVALID_OPTION,
{
"option_names": ["extra", ],
"option_type": "container",
"allowed": self.allowed_options,
},
- None
+ report_codes.FORCE_OPTIONS
),
]
)
- def test_unknown_option_remove(self):
- self.assert_command_effect(
- self.fixture_cib_extra_option,
- lambda: resource.bundle_update(
- self.env, "B1",
- container_options={
- "extra": "",
- }
+ def test_unknow_option_forced(self):
+ (self.config
+ .runner.cib.load(resources=fixture_resources_minimal)
+ .env.push_cib(resources=self.fixture_cib_extra_option)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ container_options={
+ "extra": "option",
+ },
+ force_options=True
+ )
+
+ self.env_assist.assert_reports([
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "container",
+ "allowed": self.allowed_options,
+ },
+ None
),
- self.fixture_cib_minimal,
+ ])
+
+ def test_unknown_option_remove(self):
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_extra_option)
+ .env.push_cib(resources=fixture_resources_minimal)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ container_options={
+ "extra": "",
+ },
+ force_options=True
)
-class Network(CommonTest):
+class Network(TestCase):
allowed_options = [
"control-port",
"host-interface",
@@ -293,128 +305,143 @@ class Network(CommonTest):
</resources>
"""
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_add_network(self):
- self.assert_command_effect(
- self.fixture_cib_minimal,
- lambda: resource.bundle_update(
- self.env, "B1",
- network_options={
- "host-interface": "eth0",
- }
- ),
- self.fixture_cib_interface
+ (self.config
+ .runner.cib.load(resources=fixture_resources_minimal)
+ .env.push_cib(resources=self.fixture_cib_interface)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ network_options={
+ "host-interface": "eth0",
+ }
)
def test_remove_network(self):
- self.assert_command_effect(
- self.fixture_cib_interface,
- lambda: resource.bundle_update(
- self.env, "B1",
- network_options={
- "host-interface": "",
- }
- ),
- self.fixture_cib_minimal
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_interface)
+ .env.push_cib(resources=fixture_resources_minimal)
+ )
+
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ network_options={
+ "host-interface": "",
+ }
)
def test_keep_network_when_port_map_set(self):
- fixture_cib_pre = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network host-interface="eth0">
- <something />
- </network>
- </bundle>
- </resources>
- """
- fixture_cib_post = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network>
- <something />
- </network>
- </bundle>
- </resources>
- """
- self.assert_command_effect(
- fixture_cib_pre,
- lambda: resource.bundle_update(
- self.env, "B1",
- network_options={
- "host-interface": "",
- }
- ),
- fixture_cib_post
+ (self.config
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0">
+ <something />
+ </network>
+ </bundle>
+ </resources>
+ """
+ )
+ .env.push_cib(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network>
+ <something />
+ </network>
+ </bundle>
+ </resources>
+ """
+ )
)
- def test_success(self):
- fixture_cib_pre = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network host-interface="eth0" control-port="12345" />
- </bundle>
- </resources>
- """
- fixture_cib_post = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network host-interface="eth0" host-netmask="24" />
- </bundle>
- </resources>
- """
- self.assert_command_effect(
- fixture_cib_pre,
- lambda: resource.bundle_update(
- self.env, "B1",
- network_options={
- "control-port": "",
- "host-netmask": "24",
- }
- ),
- fixture_cib_post
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ network_options={
+ "host-interface": "",
+ }
)
- def test_unknow_option(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_interface)
+ def test_success(self):
+ (self.config
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" control-port="12345"
+ />
+ </bundle>
+ </resources>
+ """
)
+ .env.push_cib(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" host-netmask="24" />
+ </bundle>
+ </resources>
+ """
+ )
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ network_options={
+ "control-port": "",
+ "host-netmask": "24",
+ }
)
- assert_raise_library_error(
+
+ def test_unknow_option(self):
+ (self.config.runner.cib.load(resources=self.fixture_cib_interface))
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_update(
- self.env, "B1",
+ self.env_assist.get_env(),
+ "B1",
network_options={
"extra": "option",
}
),
- (
- severities.ERROR,
- report_codes.INVALID_OPTION,
- {
- "option_names": ["extra", ],
- "option_type": "network",
- "allowed": self.allowed_options,
- },
- report_codes.FORCE_OPTIONS
- ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "network",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ ]
)
- self.runner.assert_everything_launched()
def test_unknow_option_forced(self):
- self.assert_command_effect(
- self.fixture_cib_interface,
- lambda: resource.bundle_update(
- self.env, "B1",
- network_options={
- "extra": "option",
- },
- force_options=True
- ),
- self.fixture_cib_extra_option,
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_interface)
+ .env.push_cib(resources=self.fixture_cib_extra_option)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ network_options={
+ "extra": "option",
+ },
+ force_options=True
+ )
+ self.env_assist.assert_reports(
[
(
severities.WARNING,
@@ -430,19 +457,19 @@ class Network(CommonTest):
)
def test_unknown_option_remove(self):
- self.assert_command_effect(
- self.fixture_cib_extra_option,
- lambda: resource.bundle_update(
- self.env, "B1",
- network_options={
- "extra": "",
- }
- ),
- self.fixture_cib_interface,
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_extra_option)
+ .env.push_cib(resources=self.fixture_cib_interface)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ network_options={
+ "extra": "",
+ }
)
-
-class PortMap(CommonTest):
+class PortMap(TestCase):
allowed_options = [
"id",
"port",
@@ -473,117 +500,126 @@ class PortMap(CommonTest):
</resources>
"""
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_add_network(self):
- self.assert_command_effect(
- self.fixture_cib_minimal,
- lambda: resource.bundle_update(
- self.env, "B1",
- port_map_add=[
- {
- "port": "80",
- }
- ]
- ),
- self.fixture_cib_port_80
+ (self.config
+ .runner.cib.load(resources=fixture_resources_minimal)
+ .env.push_cib(resources=self.fixture_cib_port_80)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ port_map_add=[
+ {
+ "port": "80",
+ }
+ ]
)
def test_remove_network(self):
- self.assert_command_effect(
- self.fixture_cib_port_80,
- lambda: resource.bundle_update(
- self.env, "B1",
- port_map_remove=[
- "B1-port-map-80",
- ]
- ),
- self.fixture_cib_minimal
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_port_80)
+ .env.push_cib(resources=fixture_resources_minimal)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ port_map_remove=[
+ "B1-port-map-80",
+ ]
)
def test_keep_network_when_options_set(self):
- fixture_cib_pre = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network host-interface="eth0">
- <port-mapping id="B1-port-map-80" port="80" />
- </network>
- </bundle>
- </resources>
- """
- fixture_cib_post = """
- <resources>
- <bundle id="B1">
- <docker image="pcs:test" />
- <network host-interface="eth0" />
- </bundle>
- </resources>
- """
- self.assert_command_effect(
- fixture_cib_pre,
- lambda: resource.bundle_update(
- self.env, "B1",
- port_map_remove=[
- "B1-port-map-80",
- ]
- ),
- fixture_cib_post
+ (self.config
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0">
+ <port-mapping id="B1-port-map-80" port="80" />
+ </network>
+ </bundle>
+ </resources>
+ """
+ )
+ .env.push_cib(
+ resources="""
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" />
+ </bundle>
+ </resources>
+ """
+ )
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ port_map_remove=[
+ "B1-port-map-80",
+ ]
)
def test_add(self):
- self.assert_command_effect(
- self.fixture_cib_port_80,
- lambda: resource.bundle_update(
- self.env, "B1",
- port_map_add=[
- {
- "port": "8080",
- }
- ]
- ),
- self.fixture_cib_port_80_8080
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_port_80)
+ .env.push_cib(resources=self.fixture_cib_port_80_8080)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ port_map_add=[
+ {
+ "port": "8080",
+ }
+ ]
)
def test_remove(self):
- self.assert_command_effect(
- self.fixture_cib_port_80_8080,
- lambda: resource.bundle_update(
- self.env, "B1",
- port_map_remove=[
- "B1-port-map-8080",
- ]
- ),
- self.fixture_cib_port_80
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_port_80_8080)
+ .env.push_cib(resources=self.fixture_cib_port_80)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ port_map_remove=[
+ "B1-port-map-8080",
+ ]
)
def test_remove_missing(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_port_80)
- )
- )
- assert_raise_library_error(
+ self.config.runner.cib.load(resources=self.fixture_cib_port_80)
+
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_update(
- self.env, "B1",
+ self.env_assist.get_env(),
+ "B1",
port_map_remove=[
"B1-port-map-8080",
]
),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "B1-port-map-8080",
- "id_description": "port-map",
- "context_type": "bundle",
- "context_id": "B1",
- },
- None
- ),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "B1-port-map-8080",
+ "id_description": "port-map",
+ "context_type": "bundle",
+ "context_id": "B1",
+ },
+ None
+ ),
+ ]
)
- self.runner.assert_everything_launched()
-class StorageMap(CommonTest):
+class StorageMap(TestCase):
allowed_options = [
"id",
"options",
@@ -627,89 +663,95 @@ class StorageMap(CommonTest):
</resources>
"""
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_add_storage(self):
- self.assert_command_effect(
- self.fixture_cib_minimal,
- lambda: resource.bundle_update(
- self.env, "B1",
- storage_map_add=[
- {
- "source-dir": "/tmp/docker1a",
- "target-dir": "/tmp/docker1b",
- }
- ]
- ),
- self.fixture_cib_storage_1
+ (self.config
+ .runner.cib.load(resources=fixture_resources_minimal)
+ .env.push_cib(resources=self.fixture_cib_storage_1)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ storage_map_add=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ }
+ ]
)
def test_remove_storage(self):
- self.assert_command_effect(
- self.fixture_cib_storage_1,
- lambda: resource.bundle_update(
- self.env, "B1",
- storage_map_remove=[
- "B1-storage-map",
- ]
- ),
- self.fixture_cib_minimal
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_storage_1)
+ .env.push_cib(resources=fixture_resources_minimal)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ storage_map_remove=[
+ "B1-storage-map",
+ ]
)
def test_add(self):
- self.assert_command_effect(
- self.fixture_cib_storage_1,
- lambda: resource.bundle_update(
- self.env, "B1",
- storage_map_add=[
- {
- "source-dir": "/tmp/docker2a",
- "target-dir": "/tmp/docker2b",
- }
- ]
- ),
- self.fixture_cib_storage_1_2
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_storage_1)
+ .env.push_cib(resources=self.fixture_cib_storage_1_2)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ storage_map_add=[
+ {
+ "source-dir": "/tmp/docker2a",
+ "target-dir": "/tmp/docker2b",
+ }
+ ]
)
def test_remove(self):
- self.assert_command_effect(
- self.fixture_cib_storage_1_2,
- lambda: resource.bundle_update(
- self.env, "B1",
- storage_map_remove=[
- "B1-storage-map-1",
- ]
- ),
- self.fixture_cib_storage_1
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_storage_1_2)
+ .env.push_cib(resources=self.fixture_cib_storage_1)
+ )
+
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ storage_map_remove=[
+ "B1-storage-map-1",
+ ]
)
def test_remove_missing(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- self.fixture_cib_resources(self.fixture_cib_storage_1)
- )
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_storage_1)
)
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: resource.bundle_update(
- self.env, "B1",
+ self.env_assist.get_env(), "B1",
storage_map_remove=[
"B1-storage-map-1",
]
),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "B1-storage-map-1",
- "id_description": "storage-map",
- "context_type": "bundle",
- "context_id": "B1",
- },
- None
- ),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "B1-storage-map-1",
+ "id_description": "storage-map",
+ "context_type": "bundle",
+ "context_id": "B1",
+ },
+ None
+ )
+ ]
)
- self.runner.assert_everything_launched()
-
-class Meta(CommonTest):
+class Meta(TestCase):
fixture_no_meta = """
<resources>
<bundle id="B1">
@@ -730,28 +772,33 @@ class Meta(CommonTest):
</resources>
"""
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_add_meta_element(self):
- self.assert_command_effect(
- self.fixture_no_meta,
- lambda: resource.bundle_update(
- self.env, "B1",
- meta_attributes={
- "target-role": "Stopped",
- }
- ),
- self.fixture_meta_stopped
+ (self.config
+ .runner.cib.load(resources=self.fixture_no_meta)
+ .env.push_cib(resources=self.fixture_meta_stopped)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ meta_attributes={
+ "target-role": "Stopped",
+ }
)
def test_remove_meta_element(self):
- self.assert_command_effect(
- self.fixture_meta_stopped,
- lambda: resource.bundle_update(
- self.env, "B1",
- meta_attributes={
- "target-role": "",
- }
- ),
- self.fixture_no_meta
+ (self.config
+ .runner.cib.load(resources=self.fixture_meta_stopped)
+ .env.push_cib(resources=self.fixture_no_meta)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(),
+ "B1",
+ meta_attributes={
+ "target-role": "",
+ }
)
def test_change_meta(self):
@@ -785,21 +832,21 @@ class Meta(CommonTest):
</bundle>
</resources>
"""
- self.assert_command_effect(
- fixture_cib_pre,
- lambda: resource.bundle_update(
- self.env, "B1",
- meta_attributes={
- "priority": "10",
- "resource-stickiness": "100",
- "is-managed": "",
- }
- ),
- fixture_cib_post
+ (self.config
+ .runner.cib.load(resources=fixture_cib_pre)
+ .env.push_cib(resources=fixture_cib_post)
+ )
+ resource.bundle_update(
+ self.env_assist.get_env(), "B1",
+ meta_attributes={
+ "priority": "10",
+ "resource-stickiness": "100",
+ "is-managed": "",
+ }
)
-class Wait(CommonTest):
+class Wait(TestCase):
fixture_status_running = """
<resources>
<bundle id="B1" managed="true" image="new:image">
@@ -846,71 +893,61 @@ class Wait(CommonTest):
</resources>
"""
- timeout = 10
- def fixture_calls_initial(self):
- return (
- fixture.call_wait_supported() +
- fixture.calls_cib(
- self.fixture_cib_pre,
- self.fixture_resources_bundle_simple,
- cib_base_file=self.cib_base_file,
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=self.fixture_cib_pre)
+ .env.push_cib(
+ resources=self.fixture_resources_bundle_simple,
+ wait=TIMEOUT
)
)
- def simple_bundle_update(self, wait=False):
- return resource.bundle_update(
- self.env, "B1", {"image": "new:image"}, wait=wait,
- )
-
def test_wait_fail(self):
- fixture_wait_timeout_error = dedent(
+ wait_error_message = dedent(
"""\
Pending actions:
Action 12: B1-node2-stop on node2
Error performing operation: Timer expired
"""
- )
- self.runner.set_runs(
- self.fixture_calls_initial() +
- fixture.call_wait(self.timeout, 62, fixture_wait_timeout_error)
- )
- assert_raise_library_error(
- lambda: self.simple_bundle_update(self.timeout),
- fixture.report_wait_for_idle_timed_out(
- fixture_wait_timeout_error
+ ).strip()
+ self.config.env.push_cib(
+ resources=self.fixture_resources_bundle_simple,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(wait_error_message)
),
+ instead="env.push_cib"
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: simple_bundle_update(self.env_assist.get_env()),
+ [
+ fixture.report_wait_for_idle_timed_out(wait_error_message)
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_wait_ok_running(self):
- self.runner.set_runs(
- self.fixture_calls_initial() +
- fixture.call_wait(self.timeout) +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_running
- ))
- )
- self.simple_bundle_update(self.timeout)
- self.env.report_processor.assert_reports([
+ (self.config
+ .runner.pcmk.load_state(resources=self.fixture_status_running)
+ )
+ simple_bundle_update(self.env_assist.get_env())
+ self.env_assist.assert_reports([
fixture.report_resource_running(
"B1", {"Started": ["node1", "node2"]}
),
])
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_wait_ok_not_running(self):
- self.runner.set_runs(
- self.fixture_calls_initial() +
- fixture.call_wait(self.timeout) +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_not_running
- ))
- )
- self.simple_bundle_update(self.timeout)
- self.env.report_processor.assert_reports([
+ (self.config
+ .runner.pcmk.load_state(resources=self.fixture_status_not_running)
+ )
+ simple_bundle_update(self.env_assist.get_env())
+ self.env_assist.assert_reports([
fixture.report_resource_not_running("B1", severities.INFO),
])
- self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_resource_create.py b/pcs/lib/commands/test/resource/test_resource_create.py
index 6438a70..f0d7f34 100644
--- a/pcs/lib/commands/test/resource/test_resource_create.py
+++ b/pcs/lib/commands/test/resource/test_resource_create.py
@@ -2,52 +2,125 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-from functools import partial
-import logging
-
-from lxml import etree
-
-from pcs.test.tools.pcs_unittest import TestCase, mock
from pcs.common import report_codes
-from pcs.lib.env import LibraryEnvironment
+from pcs.lib import reports
from pcs.lib.commands import resource
-from pcs.lib.errors import ReportItemSeverity as severities
-from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
-import pcs.lib.commands.test.resource.fixture as fixture
-from pcs.test.tools.assertions import assert_raise_library_error
-from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.integration_lib import (
- Call,
- Runner,
-)
+from pcs.lib.errors import LibraryError
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.misc import (
- get_test_resource as rc,
outdent,
skip_unless_pacemaker_supports_bundle,
)
-from pcs.test.tools.xml import etree_to_str
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+TIMEOUT=10
+
+def create(
+ env, wait=False, disabled=False, meta_attributes=None, operations=None,
+ allow_invalid_operation=False
+):
+ return resource.create(
+ env,
+ "A", "ocf:heartbeat:Dummy",
+ operations=operations if operations else [],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ wait=wait,
+ ensure_disabled=disabled,
+ allow_invalid_operation=allow_invalid_operation
+ )
+def create_master(
+ env, wait=TIMEOUT, disabled=False, meta_attributes=None,
+ master_meta_options=None
+):
+ return resource.create_as_master(
+ env,
+ "A", "ocf:heartbeat:Dummy",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ clone_meta_options=master_meta_options if master_meta_options
+ else {}
+ ,
+ wait=wait,
+ ensure_disabled=disabled
+ )
+def create_group(env, wait=TIMEOUT, disabled=False, meta_attributes=None):
+ return resource.create_in_group(
+ env,
+ "A", "ocf:heartbeat:Dummy", "G",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ wait=wait,
+ ensure_disabled=disabled
+ )
-runner = Runner()
+def create_clone(
+ env, wait=TIMEOUT, disabled=False, meta_attributes=None, clone_options=None
+):
+ return resource.create_as_clone(
+ env,
+ "A", "ocf:heartbeat:Dummy",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ clone_meta_options=clone_options if clone_options else {},
+ wait=wait,
+ ensure_disabled=disabled
+ )
-fixture_cib_resources_xml_simplest = """<resources>
- <primitive class="ocf" id="A" provider="heartbeat"
- type="Dummy"
- >
- <operations>
- <op id="A-monitor-interval-10" interval="10" name="monitor"
- timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
- </operations>
- </primitive>
-</resources>"""
+def create_bundle(env, wait=TIMEOUT, disabled=False, meta_attributes=None):
+ return resource.create_into_bundle(
+ env,
+ "A", "ocf:heartbeat:Dummy",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ bundle_id="B",
+ wait=wait,
+ ensure_disabled=disabled
+ )
+
+wait_error_message = outdent(
+ """\
+ Pending actions:
+ Action 39: stonith-vm-rhel72-1-reboot on vm-rhel72-1
+ Error performing operation: Timer expired
+ """
+).strip()
+
+fixture_cib_resources_xml_primitive_simplest = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+"""
fixture_cib_resources_xml_simplest_disabled = """<resources>
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
@@ -57,9 +130,18 @@ fixture_cib_resources_xml_simplest_disabled = """<resources>
/>
</meta_attributes>
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s" name="migrate_to"
+ timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -72,9 +154,18 @@ fixture_cib_resources_xml_master_simplest = """<resources>
<master id="A-master">
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -96,9 +187,18 @@ fixture_cib_resources_xml_master_simplest_disabled = """<resources>
</meta_attributes>
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -114,9 +214,18 @@ fixture_cib_resources_xml_master_simplest_disabled_meta_after = """<resources>
<master id="A-master">
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -137,9 +246,18 @@ fixture_cib_resources_xml_group_simplest = """<resources>
<group id="G">
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -161,9 +279,18 @@ fixture_cib_resources_xml_group_simplest_disabled = """<resources>
/>
</meta_attributes>
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -180,9 +307,18 @@ fixture_cib_resources_xml_clone_simplest = """<resources>
<clone id="A-clone">
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -204,9 +340,18 @@ fixture_cib_resources_xml_clone_simplest_disabled = """<resources>
</meta_attributes>
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10" name="monitor"
timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -242,739 +387,1067 @@ def fixture_state_resources_xml(role="Started", failed="false"):
)
)
-def fixture_cib_calls(cib_resources_xml):
- cib_xml = open(rc("cib-empty.xml")).read()
-
- cib = etree.fromstring(cib_xml)
- resources_section = cib.find(".//resources")
- for child in etree.fromstring(cib_resources_xml):
- resources_section.append(child)
-
- return [
- Call("cibadmin --local --query", cib_xml),
- Call(
- "cibadmin --replace --verbose --xml-pipe --scope configuration",
- check_stdin=Call.create_check_stdin_xml(etree_to_str(cib))
- ),
- ]
-
-def fixture_agent_load_calls():
- return [
- Call(
- "crm_resource --show-metadata ocf:heartbeat:Dummy",
- open(rc("resource_agent_ocf_heartbeat_dummy.xml")).read()
- ),
- ]
-
-
-def fixture_pre_timeout_calls(cib_resources_xml):
- return (
- fixture_agent_load_calls()
- +
- [
- Call("crm_resource -?", "--wait"),
- ]
- +
- fixture_cib_calls(cib_resources_xml)
- )
-
-def fixture_wait_and_get_state_calls(state_resource_xml):
- crm_mon = etree.fromstring(open(rc("crm_mon.minimal.xml")).read())
- crm_mon.append(etree.fromstring(state_resource_xml))
-
- return [
- Call("crm_resource --wait --timeout=10"),
- Call(
- "crm_mon --one-shot --as-xml --inactive",
- etree_to_str(crm_mon),
- ),
- ]
-
-def fixture_calls_including_waiting(cib_resources_xml, state_resources_xml):
- return (
- fixture_pre_timeout_calls(cib_resources_xml)
- +
- fixture_wait_and_get_state_calls(state_resources_xml)
- )
-
-class CommonResourceTest(TestCase):
- @classmethod
- def setUpClass(cls):
- cls.patcher = mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: runner
- )
- cls.patcher.start()
- cls.patcher_corosync = mock.patch.object(
- LibraryEnvironment,
- "get_corosync_conf_data",
- lambda self: open(rc("corosync.conf")).read()
- )
- cls.patcher_corosync.start()
-
- @classmethod
- def tearDownClass(cls):
- cls.patcher.stop()
- cls.patcher_corosync.stop()
+class Create(TestCase):
+ fixture_sanitized_operation = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
+ <op id="A-monitor-interval-20" interval="20"
+ name="moni*tor" timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
def setUp(self):
- self.env = LibraryEnvironment(
- mock.MagicMock(logging.Logger),
- MockLibraryReportProcessor()
- )
- self.create = partial(self.get_create(), self.env)
-
- def assert_command_effect(self, cmd, cib_resources_xml, reports=None):
- runner.set_runs(
- fixture_agent_load_calls()
- +
- fixture_cib_calls(cib_resources_xml)
- )
- cmd()
- self.env.report_processor.assert_reports(reports if reports else [])
- runner.assert_everything_launched()
-
- def assert_wait_fail(self, command, cib_resources_xml):
- wait_error_message = outdent(
- """\
- Pending actions:
- Action 39: stonith-vm-rhel72-1-reboot on vm-rhel72-1
- Error performing operation: Timer expired
- """
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.cib.load()
)
- runner.set_runs(fixture_pre_timeout_calls(cib_resources_xml) + [
- Call(
- "crm_resource --wait --timeout=10",
- stderr=wait_error_message,
- returncode=62,
- ),
- ])
-
- assert_raise_library_error(
- command,
- (
- severities.ERROR,
- report_codes.WAIT_FOR_IDLE_TIMED_OUT,
- {
- "reason": wait_error_message.strip(),
- },
- None
- )
- )
- runner.assert_everything_launched()
-
- def assert_wait_ok_run_fail(
- self, command, cib_resources_xml, state_resources_xml
- ):
- runner.set_runs(fixture_calls_including_waiting(
- cib_resources_xml,
- state_resources_xml
- ))
-
- assert_raise_library_error(
- command,
- (
- severities.ERROR,
- report_codes.RESOURCE_DOES_NOT_RUN,
- {
- "resource_id": "A",
- },
- None
- )
- )
- runner.assert_everything_launched()
-
- def assert_wait_ok_run_ok(
- self, command, cib_resources_xml, state_resources_xml
- ):
- runner.set_runs(fixture_calls_including_waiting(
- cib_resources_xml,
- state_resources_xml
- ))
- command()
- self.env.report_processor.assert_reports([
- (
- severities.INFO,
- report_codes.RESOURCE_RUNNING_ON_NODES,
- {
- "roles_with_nodes": {"Started": ["node1"]},
- "resource_id": "A",
- },
- None
- ),
- ])
- runner.assert_everything_launched()
-
- def assert_wait_ok_disable_fail(
- self, command, cib_resources_xml, state_resources_xml
- ):
- runner.set_runs(fixture_calls_including_waiting(
- cib_resources_xml,
- state_resources_xml
- ))
-
- assert_raise_library_error(
- command,
- (
- severities.ERROR,
- report_codes.RESOURCE_RUNNING_ON_NODES,
- {
- 'roles_with_nodes': {'Started': ['node1']},
- 'resource_id': 'A'
- },
- None
- )
- )
- runner.assert_everything_launched()
-
- def assert_wait_ok_disable_ok(
- self, command, cib_resources_xml, state_resources_xml
- ):
- runner.set_runs(fixture_calls_including_waiting(
- cib_resources_xml,
- state_resources_xml
- ))
- command()
- self.env.report_processor.assert_reports([
- (
- severities.INFO,
- report_codes.RESOURCE_DOES_NOT_RUN,
- {
- "resource_id": "A",
- },
- None
- ),
- ])
- runner.assert_everything_launched()
+ def test_simplest_resource(self):
+ self.config.env.push_cib(
+ resources=fixture_cib_resources_xml_primitive_simplest
+ )
+ return create(self.env_assist.get_env())
-class Create(CommonResourceTest):
- def get_create(self):
- return resource.create
+ def test_resource_with_operation(self):
+ self.config.env.push_cib(
+ resources="""
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="10s"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
- def simplest_create(self, wait=False, disabled=False, meta_attributes=None):
- return self.create(
- "A", "ocf:heartbeat:Dummy",
- operations=[],
- meta_attributes=meta_attributes if meta_attributes else {},
- instance_attributes={},
- wait=wait,
- ensure_disabled=disabled
+ create(
+ self.env_assist.get_env(),
+ operations=[
+ {"name": "monitor", "timeout": "10s", "interval": "10"}
+ ]
)
- def test_simplest_resource(self):
- self.assert_command_effect(
- self.simplest_create,
- fixture_cib_resources_xml_simplest
+ def test_sanitize_operation_id_from_agent(self):
+ self.config.runner.pcmk.load_agent(
+ instead="runner.pcmk.load_agent",
+ agent_filename="resource_agent_ocf_heartbeat_dummy_insane_action.xml"
+ )
+ self.config.env.push_cib(
+ resources=self.fixture_sanitized_operation
)
+ return create(self.env_assist.get_env())
- def test_resource_with_operation(self):
- self.assert_command_effect(
- lambda: self.create(
- "A", "ocf:heartbeat:Dummy",
- operations=[
- {"name": "monitor", "timeout": "10s", "interval": "10"}
- ],
- meta_attributes={},
- instance_attributes={},
+ def test_sanitize_operation_id_from_user(self):
+ self.config.env.push_cib(
+ resources=self.fixture_sanitized_operation
+ )
+ create(
+ self.env_assist.get_env(),
+ operations=[
+ {"name": "moni*tor", "timeout": "20", "interval": "20"}
+ ],
+ allow_invalid_operation=True
+ )
+ self.env_assist.assert_reports([
+ fixture.warn(
+ report_codes.INVALID_OPTION_VALUE,
+ option_name="operation name",
+ option_value="moni*tor",
+ allowed_values=["start", "stop", "monitor", "reload",
+ "migrate_to", "migrate_from", "meta-data", "validate-all"]
),
- """<resources>
- <primitive class="ocf" id="A" provider="heartbeat"
- type="Dummy"
- >
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor" timeout="10s"
- />
- <op id="A-start-interval-0s" interval="0s"
- name="start" timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s"
- name="stop" timeout="20"
- />
- </operations>
- </primitive>
- </resources>"""
+ ])
+
+
+class CreateWait(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load()
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_primitive_simplest,
+ wait=TIMEOUT
+ )
)
def test_fail_wait(self):
- self.assert_wait_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_simplest,
+ self.config.env.push_cib(
+ resources=fixture_cib_resources_xml_primitive_simplest,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(wait_error_message)
+ ),
+ instead="env.push_cib"
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create(self.env_assist.get_env(), wait=TIMEOUT),
+ [
+ fixture.report_wait_for_idle_timed_out(wait_error_message)
+ ],
+ expected_in_processor=False
)
def test_wait_ok_run_fail(self):
- self.assert_wait_ok_run_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_simplest,
- fixture_state_resources_xml(failed="true"),
+ (self.config
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(failed="true")
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: create(self.env_assist.get_env(), wait=TIMEOUT),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ]
)
def test_wait_ok_run_ok(self):
- self.assert_wait_ok_run_ok(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_simplest,
- fixture_state_resources_xml(),
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_state_resources_xml())
)
+ create(self.env_assist.get_env(), wait=TIMEOUT)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": ["node1"]},
+ resource_id="A",
+ ),
+ ])
def test_wait_ok_disable_fail(self):
- self.assert_wait_ok_disable_fail(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_simplest_disabled,
- fixture_state_resources_xml(),
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_state_resources_xml())
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_simplest_disabled,
+ wait=TIMEOUT,
+ instead="env.push_cib"
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: create(
+ self.env_assist.get_env(),
+ wait=TIMEOUT,
+ disabled=True
+ ),
+ [
+ fixture.error(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": ["node1"]},
+ resource_id="A",
+ ),
+ ]
)
def test_wait_ok_disable_ok(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_simplest_disabled,
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_simplest_disabled,
+ wait=TIMEOUT,
+ instead="env.push_cib"
+ )
)
+ create(self.env_assist.get_env(), wait=TIMEOUT, disabled=True)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
+
def test_wait_ok_disable_ok_by_target_role(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- meta_attributes={"target-role": "Stopped"}
- ),
- fixture_cib_resources_xml_simplest_disabled,
- fixture_state_resources_xml(role="Stopped"),
- )
-
-class CreateAsMaster(CommonResourceTest):
- def get_create(self):
- return resource.create_as_master
-
- def simplest_create(
- self, wait=False, disabled=False, meta_attributes=None,
- master_meta_options=None
- ):
- return self.create(
- "A", "ocf:heartbeat:Dummy",
- operations=[],
- meta_attributes=meta_attributes if meta_attributes else {},
- instance_attributes={},
- clone_meta_options=master_meta_options if master_meta_options
- else {}
- ,
- wait=wait,
- ensure_disabled=disabled
+ (self.config
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_simplest_disabled,
+ wait=TIMEOUT,
+ instead="env.push_cib"
+ )
+ )
+ create(
+ self.env_assist.get_env(),
+ wait=TIMEOUT,
+ meta_attributes={"target-role": "Stopped"}
+ )
+
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
+
+class CreateAsMaster(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load()
)
def test_simplest_resource(self):
- self.assert_command_effect(
- self.simplest_create,
- fixture_cib_resources_xml_master_simplest
+ (self.config
+ .remove(name="runner.pcmk.can_wait")
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_master_simplest
+ )
)
+ create_master(self.env_assist.get_env(), wait=False)
def test_fail_wait(self):
- self.assert_wait_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_master_simplest,
+ self.config.env.push_cib(
+ resources=fixture_cib_resources_xml_master_simplest,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(wait_error_message)
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_master(self.env_assist.get_env()),
+ [
+ fixture.report_wait_for_idle_timed_out(wait_error_message)
+ ],
+ expected_in_processor=False
)
def test_wait_ok_run_fail(self):
- self.assert_wait_ok_run_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_master_simplest,
- fixture_state_resources_xml(failed="true"),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_master_simplest,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(failed="true")
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_master(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A"
+ )
+ ]
)
def test_wait_ok_run_ok(self):
- self.assert_wait_ok_run_ok(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_master_simplest,
- fixture_state_resources_xml(),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_master_simplest,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml()
+ )
)
+ create_master(self.env_assist.get_env())
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": ["node1"]},
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_fail(self):
- self.assert_wait_ok_disable_fail(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_master_simplest_disabled,
- fixture_state_resources_xml(),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_master_simplest_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml()
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: create_master(self.env_assist.get_env(), disabled=True),
+ [
+ fixture.error(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={'Started': ['node1']},
+ resource_id='A'
+ )
+ ],
)
def test_wait_ok_disable_ok(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_master_simplest_disabled,
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_master_simplest_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_master(self.env_assist.get_env(), disabled=True)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_target_role(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- meta_attributes={"target-role": "Stopped"}
- ),
- """<resources>
- <master id="A-master">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <meta_attributes id="A-meta_attributes">
- <nvpair id="A-meta_attributes-target-role"
- name="target-role" value="Stopped"
- />
- </meta_attributes>
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor" timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- </master>
- </resources>""",
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources="""
+ <resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </master>
+ </resources>
+ """,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
+ )
+ create_master(
+ self.env_assist.get_env(),
+ meta_attributes={"target-role": "Stopped"}
)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_target_role_in_master(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- master_meta_options={"target-role": "Stopped"}
- ),
- fixture_cib_resources_xml_master_simplest_disabled_meta_after,
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(resources
+ =fixture_cib_resources_xml_master_simplest_disabled_meta_after,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
+ )
+ create_master(
+ self.env_assist.get_env(),
+ master_meta_options={"target-role": "Stopped"}
)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_clone_max(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- master_meta_options={"clone-max": "0"}
- ),
- """<resources>
- <master id="A-master">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor"
- timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- <meta_attributes id="A-master-meta_attributes">
- <nvpair id="A-master-meta_attributes-clone-max"
- name="clone-max" value="0"
- />
- </meta_attributes>
- </master>
- </resources>""",
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources="""
+ <resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-clone-max"
+ name="clone-max" value="0"
+ />
+ </meta_attributes>
+ </master>
+ </resources>
+ """,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
+ )
+ create_master(
+ self.env_assist.get_env(),
+ master_meta_options={"clone-max": "0"}
)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_clone_node_max(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- master_meta_options={"clone-node-max": "0"}
- ),
- """<resources>
- <master id="A-master">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor"
- timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- <meta_attributes id="A-master-meta_attributes">
- <nvpair id="A-master-meta_attributes-clone-node-max"
- name="clone-node-max" value="0"
- />
- </meta_attributes>
- </master>
- </resources>""",
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources="""
+ <resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair
+ id="A-master-meta_attributes-clone-node-max"
+ name="clone-node-max" value="0"
+ />
+ </meta_attributes>
+ </master>
+ </resources>
+ """,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_master(
+ self.env_assist.get_env(),
+ master_meta_options={"clone-node-max": "0"}
+ )
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
-class CreateInGroup(CommonResourceTest):
- def get_create(self):
- return resource.create_in_group
-
- def simplest_create(self, wait=False, disabled=False, meta_attributes=None):
- return self.create(
- "A", "ocf:heartbeat:Dummy", "G",
- operations=[],
- meta_attributes=meta_attributes if meta_attributes else {},
- instance_attributes={},
- wait=wait,
- ensure_disabled=disabled
+class CreateInGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load()
)
def test_simplest_resource(self):
- self.assert_command_effect(self.simplest_create, """<resources>
- <group id="G">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor" timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- </group>
- </resources>""")
+ (self.config
+ .remove(name="runner.pcmk.can_wait")
+ .env.push_cib(
+ resources="""
+ <resources>
+ <group id="G">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ """
+ )
+ )
+
+ create_group(self.env_assist.get_env(), wait=False)
def test_fail_wait(self):
- self.assert_wait_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_group_simplest,
+ self.config.env.push_cib(
+ resources=fixture_cib_resources_xml_group_simplest,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(wait_error_message)
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: create_group(self.env_assist.get_env()),
+ [
+ fixture.report_wait_for_idle_timed_out(wait_error_message)
+ ],
+ expected_in_processor=False
)
def test_wait_ok_run_fail(self):
- self.assert_wait_ok_run_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_group_simplest,
- fixture_state_resources_xml(failed="true"),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_group_simplest,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(failed="true")
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_group(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A"
+ )
+ ]
)
def test_wait_ok_run_ok(self):
- self.assert_wait_ok_run_ok(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_group_simplest,
- fixture_state_resources_xml(),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_group_simplest,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml()
+ )
)
+ create_group(self.env_assist.get_env())
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": ["node1"]},
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_fail(self):
- self.assert_wait_ok_disable_fail(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_group_simplest_disabled,
- fixture_state_resources_xml(),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_group_simplest_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml()
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: create_group(self.env_assist.get_env(), disabled=True),
+ [
+ fixture.error(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={'Started': ['node1']},
+ resource_id='A'
+ )
+ ],
)
def test_wait_ok_disable_ok(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_group_simplest_disabled,
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_group_simplest_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_group(self.env_assist.get_env(), disabled=True)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_target_role(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- meta_attributes={"target-role": "Stopped"}
- ),
- fixture_cib_resources_xml_group_simplest_disabled,
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_group_simplest_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_group(
+ self.env_assist.get_env(),
+ meta_attributes={"target-role": "Stopped"}
+ )
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
-class CreateAsClone(CommonResourceTest):
- def get_create(self):
- return resource.create_as_clone
-
- def simplest_create(
- self, wait=False, disabled=False, meta_attributes=None,
- clone_options=None
- ):
- return self.create(
- "A", "ocf:heartbeat:Dummy",
- operations=[],
- meta_attributes=meta_attributes if meta_attributes else {},
- instance_attributes={},
- clone_meta_options=clone_options if clone_options else {},
- wait=wait,
- ensure_disabled=disabled
+class CreateAsClone(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load()
)
def test_simplest_resource(self):
- self.assert_command_effect(
- self.simplest_create,
- fixture_cib_resources_xml_clone_simplest
+ (self.config
+ .remove(name="runner.pcmk.can_wait")
+ .env.push_cib(resources=fixture_cib_resources_xml_clone_simplest)
)
+ create_clone(self.env_assist.get_env(), wait=False)
def test_fail_wait(self):
- self.assert_wait_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_clone_simplest,
+ self.config.env.push_cib(
+ resources=fixture_cib_resources_xml_clone_simplest,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(wait_error_message)
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_clone(self.env_assist.get_env()),
+ [
+ fixture.report_wait_for_idle_timed_out(wait_error_message)
+ ],
+ expected_in_processor=False
)
def test_wait_ok_run_fail(self):
- self.assert_wait_ok_run_fail(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_clone_simplest,
- fixture_state_resources_xml(failed="true"),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_clone_simplest,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(failed="true")
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_clone(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A"
+ )
+ ]
)
def test_wait_ok_run_ok(self):
- self.assert_wait_ok_run_ok(
- lambda: self.simplest_create(wait="10"),
- fixture_cib_resources_xml_clone_simplest,
- fixture_state_resources_xml(),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_clone_simplest,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml()
+ )
)
+ create_clone(self.env_assist.get_env())
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={"Started": ["node1"]},
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_fail(self):
- self.assert_wait_ok_disable_fail(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_clone_simplest_disabled,
- fixture_state_resources_xml(),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_clone_simplest_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml()
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: create_clone(self.env_assist.get_env(), disabled=True),
+ [
+ fixture.error(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ roles_with_nodes={'Started': ['node1']},
+ resource_id='A'
+ )
+ ],
)
def test_wait_ok_disable_ok(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(wait="10", disabled=True),
- fixture_cib_resources_xml_clone_simplest_disabled,
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources=fixture_cib_resources_xml_clone_simplest_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_clone(self.env_assist.get_env(), disabled=True)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_target_role(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- meta_attributes={"target-role": "Stopped"}
- ),
- """<resources>
- <clone id="A-clone">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <meta_attributes id="A-meta_attributes">
- <nvpair id="A-meta_attributes-target-role"
- name="target-role"
- value="Stopped"
- />
- </meta_attributes>
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor" timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- </clone>
- </resources>"""
- ,
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources="""
+ <resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role"
+ value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ """,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_clone(
+ self.env_assist.get_env(),
+ meta_attributes={"target-role": "Stopped"}
+ )
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_target_role_in_clone(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- clone_options={"target-role": "Stopped"}
- ),
- """<resources>
- <clone id="A-clone">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor"
- timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- <meta_attributes id="A-clone-meta_attributes">
- <nvpair id="A-clone-meta_attributes-target-role"
- name="target-role" value="Stopped"
- />
- </meta_attributes>
- </clone>
- </resources>""",
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources="""
+ <resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>
+ """,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
+ )
+ create_clone(
+ self.env_assist.get_env(),
+ clone_options={"target-role": "Stopped"}
)
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_clone_max(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- clone_options={"clone-max": "0"}
- ),
- """<resources>
- <clone id="A-clone">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor"
- timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- <meta_attributes id="A-clone-meta_attributes">
- <nvpair id="A-clone-meta_attributes-clone-max"
- name="clone-max" value="0"
- />
- </meta_attributes>
- </clone>
- </resources>""",
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources="""
+ <resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-clone-max"
+ name="clone-max" value="0"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>
+ """,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_clone(
+ self.env_assist.get_env(),
+ clone_options={"clone-max": "0"}
+ )
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
def test_wait_ok_disable_ok_by_clone_node_max(self):
- self.assert_wait_ok_disable_ok(
- lambda: self.simplest_create(
- wait="10",
- clone_options={"clone-node-max": "0"}
- ),
- """<resources>
- <clone id="A-clone">
- <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
- <operations>
- <op id="A-monitor-interval-10" interval="10"
- name="monitor"
- timeout="20"
- />
- <op id="A-start-interval-0s" interval="0s" name="start"
- timeout="20"
- />
- <op id="A-stop-interval-0s" interval="0s" name="stop"
- timeout="20"
- />
- </operations>
- </primitive>
- <meta_attributes id="A-clone-meta_attributes">
- <nvpair id="A-clone-meta_attributes-clone-node-max"
- name="clone-node-max" value="0"
- />
- </meta_attributes>
- </clone>
- </resources>""",
- fixture_state_resources_xml(role="Stopped"),
+ (self.config
+ .env.push_cib(
+ resources="""
+ <resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-migrate_from-interval-0s"
+ interval="0s" name="migrate_from"
+ timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s"
+ interval="0s" name="migrate_to"
+ timeout="20"
+ />
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair
+ id="A-clone-meta_attributes-clone-node-max"
+ name="clone-node-max" value="0"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>
+ """,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_state_resources_xml(role="Stopped")
+ )
)
+ create_clone(
+ self.env_assist.get_env(),
+ clone_options={"clone-node-max": "0"}
+ )
+ self.env_assist.assert_reports([
+ fixture.info(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A",
+ )
+ ])
-
-class CreateInToBundle(ResourceWithoutStateTest):
- upgraded_cib = "cib-empty-2.8.xml"
-
+class CreateInToBundle(TestCase):
fixture_empty_resources = "<resources />"
fixture_resources_pre = """
@@ -990,9 +1463,18 @@ class CreateInToBundle(ResourceWithoutStateTest):
class="ocf" id="A" provider="heartbeat" type="Dummy"
>
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s"
name="start" timeout="20"
/>
@@ -1017,9 +1499,18 @@ class CreateInToBundle(ResourceWithoutStateTest):
/>
</meta_attributes>
<operations>
+ <op id="A-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="A-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="A-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="A-reload-interval-0s" interval="0s" name="reload"
+ timeout="20"
+ />
<op id="A-start-interval-0s" interval="0s"
name="start" timeout="20"
/>
@@ -1070,226 +1561,209 @@ class CreateInToBundle(ResourceWithoutStateTest):
</resources>
"""
- fixture_wait_timeout_error = outdent(
- """\
- Pending actions:
- Action 12: B-node2-stop on node2
- Error performing operation: Timer expired
- """
- )
-
- def simplest_create(self, wait=False, disabled=False, meta_attributes=None):
- return resource.create_into_bundle(
- self.env,
- "A", "ocf:heartbeat:Dummy",
- operations=[],
- meta_attributes=meta_attributes if meta_attributes else {},
- instance_attributes={},
- bundle_id="B",
- wait=wait,
- ensure_disabled=disabled
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(
+ test_case=self,
+ base_cib_filename="cib-empty-2.8.xml",
)
def test_upgrade_cib(self):
- self.runner.set_runs(
- fixture_agent_load_calls()
- +
- fixture.calls_cib_load_and_upgrade(self.fixture_empty_resources)
- +
- fixture.calls_cib(
- self.fixture_resources_pre,
- self.fixture_resources_post_simple,
- self.upgraded_cib,
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.cib.load(
+ filename="cib-empty.xml",
+ name="load_cib_old_version"
)
+ .runner.cib.upgrade()
+ .runner.cib.load(resources=self.fixture_resources_pre)
+ .env.push_cib(resources=self.fixture_resources_post_simple)
)
- self.simplest_create()
- self.runner.assert_everything_launched()
+ create_bundle(self.env_assist.get_env(), wait=False)
+ self.env_assist.assert_reports([
+ fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)
+ ])
def test_simplest_resource(self):
- self.runner.set_runs(
- fixture_agent_load_calls()
- +
- fixture.calls_cib(
- self.fixture_resources_pre,
- self.fixture_resources_post_simple,
- self.upgraded_cib,
- )
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.cib.load(resources=self.fixture_resources_pre)
+ .env.push_cib(resources=self.fixture_resources_post_simple)
)
- self.simplest_create()
- self.runner.assert_everything_launched()
+ create_bundle(self.env_assist.get_env(), wait=False)
def test_bundle_doesnt_exist(self):
- self.runner.set_runs(
- fixture_agent_load_calls()
- +
- fixture.call_cib_load(fixture.cib_resources(
- self.fixture_empty_resources, self.upgraded_cib,
- ))
- )
- assert_raise_library_error(
- self.simplest_create,
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "B",
- "id_description": "bundle",
- "context_type": "resources",
- "context_id": "",
- }
- )
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.cib.load(resources=self.fixture_empty_resources)
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_bundle(self.env_assist.get_env(), wait=False),
+ [
+ fixture.error(
+ report_codes.ID_NOT_FOUND,
+ id="B",
+ id_description="bundle",
+ context_type="resources",
+ context_id="",
+ )
+ ],
+ expected_in_processor=False
)
def test_id_not_bundle(self):
- resources_pre_update = """<resources>
- <primitive id="B"/>
- </resources>"""
- self.runner.set_runs(
- fixture_agent_load_calls()
- +
- fixture.call_cib_load(fixture.cib_resources(
- resources_pre_update, self.upgraded_cib,
- ))
- )
- assert_raise_library_error(
- self.simplest_create,
- (
- severities.ERROR,
- report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
- {
- "id": "B",
- "expected_types": ["bundle"],
- "current_type": "primitive",
- }
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <primitive id="B"/>
+ </resources>
+ """
)
)
+ self.env_assist.assert_raise_library_error(
+ lambda: create_bundle(self.env_assist.get_env(), wait=False),
+ [
+ fixture.error(
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ id="B",
+ expected_types=["bundle"],
+ current_type="primitive",
+ )
+ ],
+ expected_in_processor=False
+ )
+
def test_bundle_not_empty(self):
- resources_pre_update = """<resources>
- <bundle id="B">
- <primitive id="P"/>
- </bundle>
- </resources>"""
- self.runner.set_runs(
- fixture_agent_load_calls()
- +
- fixture.call_cib_load(fixture.cib_resources(
- resources_pre_update, self.upgraded_cib,
- ))
- )
- assert_raise_library_error(
- self.simplest_create,
- (
- severities.ERROR,
- report_codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE,
- {
- "bundle_id": "B",
- "resource_id": "P",
- }
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <bundle id="B">
+ <primitive id="P"/>
+ </bundle>
+ </resources>
+ """
)
)
+ self.env_assist.assert_raise_library_error(
+ lambda: create_bundle(self.env_assist.get_env(), wait=False),
+ [
+ fixture.error(
+ report_codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE,
+ bundle_id="B",
+ resource_id="P",
+ )
+ ],
+ expected_in_processor=False
+ )
def test_wait_fail(self):
- self.runner.set_runs(
- fixture.call_dummy_metadata() +
- fixture.call_wait_supported() +
- fixture.calls_cib(
- self.fixture_resources_pre,
- self.fixture_resources_post_simple,
- cib_base_file=self.upgraded_cib,
- ) +
- fixture.call_wait(10, 62, self.fixture_wait_timeout_error)
- )
- assert_raise_library_error(
- lambda: self.simplest_create(10),
- fixture.report_wait_for_idle_timed_out(
- self.fixture_wait_timeout_error
- ),
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=self.fixture_resources_pre)
+ .env.push_cib(
+ resources=self.fixture_resources_post_simple,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(wait_error_message)
+ )
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_bundle(self.env_assist.get_env()),
+ [
+ fixture.report_wait_for_idle_timed_out(wait_error_message),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_wait_ok_run_ok(self):
- self.runner.set_runs(
- fixture.call_dummy_metadata() +
- fixture.call_wait_supported() +
- fixture.calls_cib(
- self.fixture_resources_pre,
- self.fixture_resources_post_simple,
- cib_base_file=self.upgraded_cib,
- ) +
- fixture.call_wait(10) +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_running_with_primitive
- ))
- )
- self.simplest_create(10)
- self.env.report_processor.assert_reports([
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=self.fixture_resources_pre)
+ .env.push_cib(
+ resources=self.fixture_resources_post_simple,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=self.fixture_status_running_with_primitive
+ )
+ )
+ create_bundle(self.env_assist.get_env())
+ self.env_assist.assert_reports([
fixture.report_resource_running("A", {"Started": ["node1"]}),
])
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_wait_ok_run_fail(self):
- self.runner.set_runs(
- fixture.call_dummy_metadata() +
- fixture.call_wait_supported() +
- fixture.calls_cib(
- self.fixture_resources_pre,
- self.fixture_resources_post_simple,
- cib_base_file=self.upgraded_cib,
- ) +
- fixture.call_wait(10) +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_primitive_not_running
- ))
- )
- assert_raise_library_error(
- lambda: self.simplest_create(10),
- fixture.report_resource_not_running("A", severities.ERROR),
- )
- self.runner.assert_everything_launched()
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=self.fixture_resources_pre)
+ .env.push_cib(
+ resources=self.fixture_resources_post_simple,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=self.fixture_status_primitive_not_running
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_bundle(self.env_assist.get_env()),
+ [
+ fixture.error(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ resource_id="A"
+ )
+ ]
+ )
@skip_unless_pacemaker_supports_bundle
def test_disabled_wait_ok_not_running(self):
- self.runner.set_runs(
- fixture.call_dummy_metadata() +
- fixture.call_wait_supported() +
- fixture.calls_cib(
- self.fixture_resources_pre,
- self.fixture_resources_post_disabled,
- cib_base_file=self.upgraded_cib,
- ) +
- fixture.call_wait(10) +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_primitive_not_running
- ))
- )
- self.simplest_create(10, disabled=True)
- self.env.report_processor.assert_reports([
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=self.fixture_resources_pre)
+ .env.push_cib(
+ resources=self.fixture_resources_post_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=self.fixture_status_primitive_not_running
+ )
+ )
+ create_bundle(self.env_assist.get_env(), disabled=True)
+ self.env_assist.assert_reports([
fixture.report_resource_not_running("A")
])
- self.runner.assert_everything_launched()
@skip_unless_pacemaker_supports_bundle
def test_disabled_wait_ok_running(self):
- self.runner.set_runs(
- fixture.call_dummy_metadata() +
- fixture.call_wait_supported() +
- fixture.calls_cib(
- self.fixture_resources_pre,
- self.fixture_resources_post_disabled,
- cib_base_file=self.upgraded_cib,
- ) +
- fixture.call_wait(10) +
- fixture.call_status(fixture.state_complete(
- self.fixture_status_running_with_primitive
- ))
- )
- assert_raise_library_error(
- lambda: self.simplest_create(10, disabled=True),
- fixture.report_resource_running(
- "A", {"Started": ["node1"]}, severities.ERROR
- ),
+ (self.config
+ .runner.pcmk.load_agent()
+ .runner.pcmk.can_wait()
+ .runner.cib.load(resources=self.fixture_resources_pre)
+ .env.push_cib(
+ resources=self.fixture_resources_post_disabled,
+ wait=TIMEOUT
+ )
+ .runner.pcmk.load_state(
+ resources=self.fixture_status_running_with_primitive
+ )
+ )
+ self.env_assist.assert_raise_library_error(
+ lambda: create_bundle(self.env_assist.get_env(), disabled=True),
+ [
+ fixture.error(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ resource_id="A",
+ roles_with_nodes={"Started": ["node1"]},
+ )
+ ]
)
- self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_resource_enable_disable.py b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
index b03740b..e01cf8a 100644
--- a/pcs/lib/commands/test/resource/test_resource_enable_disable.py
+++ b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
@@ -2,20 +2,25 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
+from pcs.lib import reports
from pcs.lib.commands import resource
-from pcs.lib.commands.test.resource.common import ResourceWithStateTest
-import pcs.lib.commands.test.resource.fixture as fixture
-from pcs.lib.errors import ReportItemSeverity as severities
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.lib.errors import (
+ LibraryError,
+ ReportItemSeverity as severities,
+)
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.misc import (
outdent,
skip_unless_pacemaker_supports_bundle,
)
+from pcs.test.tools.pcs_unittest import TestCase
+
+TIMEOUT=10
fixture_primitive_cib_enabled = """
<resources>
@@ -537,116 +542,117 @@ def fixture_report_unmanaged(resource):
None
)
+class DisablePrimitive(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class DisablePrimitive(ResourceWithStateTest):
def test_nonexistent_resource(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(fixture_primitive_cib_enabled)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_enabled)
)
- assert_raise_library_error(
- lambda: resource.disable(self.env, ["B"], False),
- fixture.report_not_found("B", "resources")
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.disable(self.env_assist.get_env(), ["B"], False),
+ [
+ fixture.report_not_found("B", "resources")
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_nonexistent_resource_in_status(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(fixture_two_primitives_cib_enabled)
- )
- +
- fixture.call_status(
- fixture.state_complete(fixture_primitive_status_managed)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_enabled)
+ .runner.pcmk.load_state(resources=fixture_primitive_status_managed)
)
- assert_raise_library_error(
- lambda: resource.disable(self.env, ["B"], False),
- fixture.report_not_found("B")
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.disable(self.env_assist.get_env(), ["B"], False),
+ [
+ fixture.report_not_found("B")
+ ],
)
- self.runner.assert_everything_launched()
def test_correct_resource(self):
- self.assert_command_effect(
- fixture_two_primitives_cib_enabled,
- fixture_two_primitives_status_managed,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_two_primitives_cib_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_two_primitives_status_managed
+ )
+ .env.push_cib(resources=fixture_two_primitives_cib_disabled)
)
+ resource.disable(self.env_assist.get_env(), ["A"], False)
def test_unmanaged(self):
# The code doesn't care what causes the resource to be unmanaged
# (cluster property, resource's meta-attribute or whatever). It only
# checks the cluster state (crm_mon).
- self.assert_command_effect(
- fixture_primitive_cib_enabled,
- fixture_primitive_status_unmanaged,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_primitive_cib_disabled,
- reports=[
- fixture_report_unmanaged("A"),
- ]
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_primitive_status_unmanaged
+ )
+ .env.push_cib(resources=fixture_primitive_cib_disabled)
)
+ resource.disable(self.env_assist.get_env(), ["A"], False)
+ self.env_assist.assert_reports([fixture_report_unmanaged("A")])
-class EnablePrimitive(ResourceWithStateTest):
+class EnablePrimitive(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_nonexistent_resource(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(fixture_primitive_cib_disabled)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_disabled)
)
- assert_raise_library_error(
- lambda: resource.enable(self.env, ["B"], False),
- fixture.report_not_found("B", "resources")
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.enable(self.env_assist.get_env(), ["B"], False),
+ [
+ fixture.report_not_found("B", "resources")
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_nonexistent_resource_in_status(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(fixture_two_primitives_cib_disabled)
- )
- +
- fixture.call_status(
- fixture.state_complete(fixture_primitive_status_managed)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_disabled)
+ .runner.pcmk.load_state(resources=fixture_primitive_status_managed)
)
- assert_raise_library_error(
- lambda: resource.enable(self.env, ["B"], False),
- fixture.report_not_found("B")
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.enable(self.env_assist.get_env(), ["B"], False),
+ [
+ fixture.report_not_found("B")
+ ]
)
- self.runner.assert_everything_launched()
def test_correct_resource(self):
- self.assert_command_effect(
- fixture_two_primitives_cib_disabled_both,
- fixture_two_primitives_status_managed,
- lambda: resource.enable(self.env, ["B"], False),
- fixture_two_primitives_cib_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_disabled_both)
+ .runner.pcmk.load_state(
+ resources=fixture_two_primitives_status_managed
+ )
+ .env.push_cib(resources=fixture_two_primitives_cib_disabled)
)
+ resource.enable(self.env_assist.get_env(), ["B"], False)
def test_unmanaged(self):
# The code doesn't care what causes the resource to be unmanaged
# (cluster property, resource's meta-attribute or whatever). It only
# checks the cluster state (crm_mon).
- self.assert_command_effect(
- fixture_primitive_cib_disabled,
- fixture_primitive_status_unmanaged,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_primitive_cib_enabled,
- reports=[
- fixture_report_unmanaged("A"),
- ]
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_disabled)
+ .runner.pcmk.load_state(
+ resources=fixture_primitive_status_unmanaged
+ )
+ .env.push_cib(resources=fixture_primitive_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
+ self.env_assist.assert_reports([fixture_report_unmanaged("A")])
-class MoreResources(ResourceWithStateTest):
+class MoreResources(TestCase):
fixture_cib_enabled = """
<resources>
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
@@ -695,6 +701,10 @@ class MoreResources(ResourceWithStateTest):
<resource id="D" managed="false" />
</resources>
"""
+
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_success_enable(self):
fixture_enabled = """
<resources>
@@ -712,16 +722,16 @@ class MoreResources(ResourceWithStateTest):
</primitive>
</resources>
"""
- self.assert_command_effect(
- self.fixture_cib_disabled,
- self.fixture_status,
- lambda: resource.enable(self.env, ["A", "B", "D"], False),
- fixture_enabled,
- reports=[
- fixture_report_unmanaged("B"),
- fixture_report_unmanaged("D"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_disabled)
+ .runner.pcmk.load_state(resources=self.fixture_status)
+ .env.push_cib(resources=fixture_enabled)
+ )
+ resource.enable(self.env_assist.get_env(), ["A", "B", "D"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("B"),
+ fixture_report_unmanaged("D"),
+ ])
def test_success_disable(self):
fixture_disabled = """
@@ -748,47 +758,58 @@ class MoreResources(ResourceWithStateTest):
</primitive>
</resources>
"""
- self.assert_command_effect(
- self.fixture_cib_enabled,
- self.fixture_status,
- lambda: resource.disable(self.env, ["A", "B", "D"], False),
- fixture_disabled,
- reports=[
- fixture_report_unmanaged("B"),
- fixture_report_unmanaged("D"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_enabled)
+ .runner.pcmk.load_state(resources=self.fixture_status)
+ .env.push_cib(resources=fixture_disabled)
+ )
+ resource.disable(self.env_assist.get_env(), ["A", "B", "D"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("B"),
+ fixture_report_unmanaged("D"),
+ ])
def test_bad_resource_enable(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(self.fixture_cib_disabled)
- )
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_disabled)
)
- assert_raise_library_error(
- lambda: resource.enable(self.env, ["B", "X", "Y", "A"], False),
- fixture.report_not_found("X", "resources"),
- fixture.report_not_found("Y", "resources"),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.enable(
+ self.env_assist.get_env(),
+ ["B", "X", "Y", "A"],
+ wait=False
+ ),
+ [
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_bad_resource_disable(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(self.fixture_cib_enabled)
- )
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_enabled)
)
- assert_raise_library_error(
- lambda: resource.disable(self.env, ["B", "X", "Y", "A"], False),
- fixture.report_not_found("X", "resources"),
- fixture.report_not_found("Y", "resources"),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.disable(
+ self.env_assist.get_env(),
+ ["B", "X", "Y", "A"],
+ wait=False
+ ),
+ [
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
+class Wait(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.pcmk.can_wait()
-class Wait(ResourceWithStateTest):
fixture_status_running = """
<resources>
<resource id="A" managed="true" role="Started">
@@ -821,187 +842,179 @@ class Wait(ResourceWithStateTest):
Action 12: B-node2-stop on node2
Error performing operation: Timer expired
"""
- )
+ ).strip()
def test_enable_dont_wait_on_error(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.call_cib_load(
- fixture.cib_resources(fixture_primitive_cib_disabled)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_disabled)
)
- assert_raise_library_error(
- lambda: resource.enable(self.env, ["B"], 10),
- fixture.report_not_found("B", "resources"),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.enable(self.env_assist.get_env(), ["B"], TIMEOUT),
+ [
+ fixture.report_not_found("B", "resources"),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_disable_dont_wait_on_error(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.call_cib_load(
- fixture.cib_resources(fixture_primitive_cib_enabled)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_enabled)
)
- assert_raise_library_error(
- lambda: resource.disable(self.env, ["B"], 10),
- fixture.report_not_found("B", "resources"),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.disable(self.env_assist.get_env(), ["B"], TIMEOUT),
+ [
+ fixture.report_not_found("B", "resources"),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_enable_resource_stopped(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_two_primitives_cib_disabled_both,
- self.fixture_status_stopped,
- fixture_two_primitives_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_disabled_both)
+ .runner.pcmk.load_state(resources=self.fixture_status_stopped)
+ .env.push_cib(
+ resources=fixture_two_primitives_cib_enabled,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(10)
- +
- fixture.call_status(
- fixture.state_complete(self.fixture_status_stopped)
+ .runner.pcmk.load_state(
+ name="",
+ resources=self.fixture_status_stopped,
)
)
- assert_raise_library_error(
- lambda: resource.enable(self.env, ["A", "B"], 10),
- fixture.report_resource_not_running("A", severities.ERROR),
- fixture.report_resource_not_running("B", severities.ERROR),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.enable(
+ self.env_assist.get_env(), ["A", "B"], TIMEOUT
+ ),
+ [
+ fixture.report_resource_not_running("A", severities.ERROR),
+ fixture.report_resource_not_running("B", severities.ERROR),
+ ]
)
- self.runner.assert_everything_launched()
def test_disable_resource_stopped(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_two_primitives_cib_enabled,
- self.fixture_status_running,
- fixture_two_primitives_cib_disabled_both
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_enabled)
+ .runner.pcmk.load_state(resources=self.fixture_status_running)
+ .env.push_cib(
+ resources=fixture_two_primitives_cib_disabled_both,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(10)
- +
- fixture.call_status(
- fixture.state_complete(self.fixture_status_stopped)
+ .runner.pcmk.load_state(
+ name="",
+ resources=self.fixture_status_stopped,
)
)
- resource.disable(self.env, ["A", "B"], 10)
- self.env.report_processor.assert_reports([
+ resource.disable(self.env_assist.get_env(), ["A", "B"], TIMEOUT)
+ self.env_assist.assert_reports([
fixture.report_resource_not_running("A"),
fixture.report_resource_not_running("B"),
])
- self.runner.assert_everything_launched()
def test_enable_resource_running(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_two_primitives_cib_disabled_both,
- self.fixture_status_stopped,
- fixture_two_primitives_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_disabled_both)
+ .runner.pcmk.load_state(resources=self.fixture_status_stopped)
+ .env.push_cib(
+ resources=fixture_two_primitives_cib_enabled,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(10)
- +
- fixture.call_status(
- fixture.state_complete(self.fixture_status_running)
+ .runner.pcmk.load_state(
+ name="",
+ resources=self.fixture_status_running,
)
)
- resource.enable(self.env, ["A", "B"], 10)
+ resource.enable(self.env_assist.get_env(), ["A", "B"], TIMEOUT)
- self.env.report_processor.assert_reports([
+ self.env_assist.assert_reports([
fixture.report_resource_running("A", {"Started": ["node1"]}),
fixture.report_resource_running("B", {"Started": ["node2"]}),
])
- self.runner.assert_everything_launched()
def test_disable_resource_running(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_two_primitives_cib_enabled,
- self.fixture_status_running,
- fixture_two_primitives_cib_disabled_both
+ (self.config
+ .runner.cib.load(resources=fixture_two_primitives_cib_enabled)
+ .runner.pcmk.load_state(resources=self.fixture_status_running)
+ .env.push_cib(
+ resources=fixture_two_primitives_cib_disabled_both,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(10)
- +
- fixture.call_status(
- fixture.state_complete(self.fixture_status_running)
+ .runner.pcmk.load_state(
+ name="",
+ resources=self.fixture_status_running,
)
)
- assert_raise_library_error(
- lambda: resource.disable(self.env, ["A", "B"], 10),
- fixture.report_resource_running(
- "A", {"Started": ["node1"]}, severities.ERROR
- ),
- fixture.report_resource_running(
- "B", {"Started": ["node2"]}, severities.ERROR
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.disable(
+ self.env_assist.get_env(), ["A", "B"], TIMEOUT
),
+ [
+ fixture.report_resource_running(
+ "A", {"Started": ["node1"]}, severities.ERROR
+ ),
+ fixture.report_resource_running(
+ "B", {"Started": ["node2"]}, severities.ERROR
+ ),
+ ]
)
- self.runner.assert_everything_launched()
def test_enable_wait_timeout(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_primitive_cib_disabled,
- self.fixture_status_stopped,
- fixture_primitive_cib_enabled
- )
- +
- fixture.call_wait(
- 10, retval=62, stderr=self.fixture_wait_timeout_error
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_disabled)
+ .runner.pcmk.load_state(resources=self.fixture_status_stopped)
+ .env.push_cib(
+ resources=fixture_primitive_cib_enabled,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(
+ self.fixture_wait_timeout_error
+ )
+ )
)
)
- assert_raise_library_error(
- lambda: resource.enable(self.env, ["A"], 10),
- fixture.report_wait_for_idle_timed_out(
- self.fixture_wait_timeout_error
- ),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.enable(self.env_assist.get_env(), ["A"], TIMEOUT),
+ [
+ fixture.report_wait_for_idle_timed_out(
+ self.fixture_wait_timeout_error
+ )
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_disable_wait_timeout(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_primitive_cib_enabled,
- self.fixture_status_running,
- fixture_primitive_cib_disabled
- )
- +
- fixture.call_wait(
- 10, retval=62, stderr=self.fixture_wait_timeout_error
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_enabled)
+ .runner.pcmk.load_state(resources=self.fixture_status_running)
+ .env.push_cib(
+ resources=fixture_primitive_cib_disabled,
+ wait=TIMEOUT,
+ exception=LibraryError(
+ reports.wait_for_idle_timed_out(
+ self.fixture_wait_timeout_error
+ )
+ )
)
)
- assert_raise_library_error(
- lambda: resource.disable(self.env, ["A"], 10),
- fixture.report_wait_for_idle_timed_out(
- self.fixture_wait_timeout_error
- ),
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.disable(self.env_assist.get_env(), ["A"], TIMEOUT),
+ [
+ fixture.report_wait_for_idle_timed_out(
+ self.fixture_wait_timeout_error
+ )
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
-class WaitClone(ResourceWithStateTest):
+class WaitClone(TestCase):
fixture_status_running = """
<resources>
<clone id="A-clone" managed="true" multi_state="false" unique="false">
@@ -1024,25 +1037,27 @@ class WaitClone(ResourceWithStateTest):
</clone>
</resources>
"""
+
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.pcmk.can_wait()
+
def test_disable_clone(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_clone_cib_enabled,
- self.fixture_status_running,
- fixture_clone_cib_disabled_clone
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_enabled)
+ .runner.pcmk.load_state(resources=self.fixture_status_running)
+ .env.push_cib(
+ resources=fixture_clone_cib_disabled_clone,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(10)
- +
- fixture.call_status(
- fixture.state_complete(self.fixture_status_stopped)
+ .runner.pcmk.load_state(
+ name="",
+ resources=self.fixture_status_stopped,
)
)
- resource.disable(self.env, ["A-clone"], 10)
- self.env.report_processor.assert_reports([
+ resource.disable(self.env_assist.get_env(), ["A-clone"], TIMEOUT)
+ self.env_assist.assert_reports([
(
severities.INFO,
report_codes.RESOURCE_DOES_NOT_RUN,
@@ -1052,28 +1067,23 @@ class WaitClone(ResourceWithStateTest):
None
)
])
- self.runner.assert_everything_launched()
def test_enable_clone(self):
- self.runner.set_runs(
- fixture.call_wait_supported()
- +
- fixture.calls_cib_and_status(
- fixture_clone_cib_disabled_clone,
- self.fixture_status_stopped,
- fixture_clone_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_disabled_clone)
+ .runner.pcmk.load_state(resources=self.fixture_status_stopped)
+ .env.push_cib(
+ resources=fixture_clone_cib_enabled,
+ wait=TIMEOUT
)
- +
- fixture.call_wait(10)
- +
- fixture.call_status(
- fixture.state_complete(self.fixture_status_running)
+ .runner.pcmk.load_state(
+ name="",
+ resources=self.fixture_status_running,
)
)
- resource.enable(self.env, ["A-clone"], 10)
-
- self.env.report_processor.assert_reports([
+ resource.enable(self.env_assist.get_env(), ["A-clone"], TIMEOUT)
+ self.env_assist.assert_reports([
(
severities.INFO,
report_codes.RESOURCE_RUNNING_ON_NODES,
@@ -1084,495 +1094,557 @@ class WaitClone(ResourceWithStateTest):
None
)
])
- self.runner.assert_everything_launched()
+class DisableGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_group_cib_enabled)
-class DisableGroup(ResourceWithStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_group_cib_enabled,
- fixture_group_status_managed,
- lambda: resource.disable(self.env, ["A1"], False),
- fixture_group_cib_disabled_primitive
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_group_status_managed)
+ .env.push_cib(resources=fixture_group_cib_disabled_primitive)
)
+ resource.disable(self.env_assist.get_env(), ["A1"], wait=False)
def test_group(self):
- self.assert_command_effect(
- fixture_group_cib_enabled,
- fixture_group_status_managed,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_group_cib_disabled_group
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_group_status_managed)
+ .env.push_cib(resources=fixture_group_cib_disabled_group)
)
+ resource.disable(self.env_assist.get_env(), ["A"], wait=False)
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_group_cib_enabled,
- fixture_group_status_unmanaged,
- lambda: resource.disable(self.env, ["A1"], False),
- fixture_group_cib_disabled_primitive,
- reports=[
- fixture_report_unmanaged("A1"),
- ]
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_group_status_unmanaged)
+ .env.push_cib(resources=fixture_group_cib_disabled_primitive)
)
+ resource.disable(self.env_assist.get_env(), ["A1"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A1"),
+ ])
def test_group_unmanaged(self):
- self.assert_command_effect(
- fixture_group_cib_enabled,
- fixture_group_status_unmanaged,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_group_cib_disabled_group,
- reports=[
- fixture_report_unmanaged("A"),
- ]
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_group_status_unmanaged)
+ .env.push_cib(resources=fixture_group_cib_disabled_group)
)
+ resource.disable(self.env_assist.get_env(), ["A"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A"),
+ ])
+class EnableGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class EnableGroup(ResourceWithStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_group_cib_disabled_primitive,
- fixture_group_status_managed,
- lambda: resource.enable(self.env, ["A1"], False),
- fixture_group_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_group_status_managed)
+ .env.push_cib(resources=fixture_group_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A1"], wait=False)
def test_primitive_disabled_both(self):
- self.assert_command_effect(
- fixture_group_cib_disabled_both,
- fixture_group_status_managed,
- lambda: resource.enable(self.env, ["A1"], False),
- fixture_group_cib_disabled_group
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_group_status_managed)
+ .env.push_cib(resources=fixture_group_cib_disabled_group)
)
+ resource.enable(self.env_assist.get_env(), ["A1"], wait=False)
def test_group(self):
- self.assert_command_effect(
- fixture_group_cib_disabled_group,
- fixture_group_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_group_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_disabled_group)
+ .runner.pcmk.load_state(resources=fixture_group_status_managed)
+ .env.push_cib(resources=fixture_group_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], wait=False)
def test_group_both_disabled(self):
- self.assert_command_effect(
- fixture_group_cib_disabled_both,
- fixture_group_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_group_cib_disabled_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_group_status_managed)
+ .env.push_cib(resources=fixture_group_cib_disabled_primitive)
)
+ resource.enable(self.env_assist.get_env(), ["A"], wait=False)
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_group_cib_disabled_primitive,
- fixture_group_status_unmanaged,
- lambda: resource.enable(self.env, ["A1"], False),
- fixture_group_cib_enabled,
- reports=[
- fixture_report_unmanaged("A1"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_group_status_unmanaged)
+ .env.push_cib(resources=fixture_group_cib_enabled)
+ )
+ resource.enable(self.env_assist.get_env(), ["A1"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A1"),
+ ])
def test_group_unmanaged(self):
- self.assert_command_effect(
- fixture_group_cib_disabled_group,
- fixture_group_status_unmanaged,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_group_cib_enabled,
- reports=[
- fixture_report_unmanaged("A"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_disabled_group)
+ .runner.pcmk.load_state(resources=fixture_group_status_unmanaged)
+ .env.push_cib(resources=fixture_group_cib_enabled)
+ )
+ resource.enable(self.env_assist.get_env(), ["A"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A"),
+ ])
-class DisableClone(ResourceWithStateTest):
+class DisableClone(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(resources=fixture_clone_cib_enabled)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_cib_enabled,
- fixture_clone_status_managed,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_clone_cib_disabled_primitive
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_clone_status_managed)
+ .env.push_cib(resources=fixture_clone_cib_disabled_primitive)
)
+ resource.disable(self.env_assist.get_env(), ["A"], wait=False)
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_cib_enabled,
- fixture_clone_status_managed,
- lambda: resource.disable(self.env, ["A-clone"], False),
- fixture_clone_cib_disabled_clone
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_clone_status_managed)
+ .env.push_cib(resources=fixture_clone_cib_disabled_clone)
)
+ resource.disable(self.env_assist.get_env(), ["A-clone"], wait=False)
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_cib_enabled,
- fixture_clone_status_unmanaged,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_clone_cib_disabled_primitive,
- reports=[
- fixture_report_unmanaged("A"),
- ]
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_clone_status_unmanaged)
+ .env.push_cib(resources=fixture_clone_cib_disabled_primitive)
)
+ resource.disable(self.env_assist.get_env(), ["A"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A"),
+ ])
def test_clone_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_cib_enabled,
- fixture_clone_status_unmanaged,
- lambda: resource.disable(self.env, ["A-clone"], False),
- fixture_clone_cib_disabled_clone,
- reports=[
- fixture_report_unmanaged("A-clone"),
- ]
+ (self.config
+ .runner.pcmk.load_state(resources=fixture_clone_status_unmanaged)
+ .env.push_cib(resources=fixture_clone_cib_disabled_clone)
)
+ resource.disable(self.env_assist.get_env(), ["A-clone"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A-clone"),
+ ])
+class EnableClone(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class EnableClone(ResourceWithStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_cib_disabled_primitive,
- fixture_clone_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_clone_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_clone_status_managed)
+ .env.push_cib(resources=fixture_clone_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], wait=False)
def test_primitive_disabled_both(self):
- self.assert_command_effect(
- fixture_clone_cib_disabled_both,
- fixture_clone_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_clone_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_clone_status_managed)
+ .env.push_cib(resources=fixture_clone_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], wait=False)
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_cib_disabled_clone,
- fixture_clone_status_managed,
- lambda: resource.enable(self.env, ["A-clone"], False),
- fixture_clone_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_disabled_clone)
+ .runner.pcmk.load_state(resources=fixture_clone_status_managed)
+ .env.push_cib(resources=fixture_clone_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A-clone"], wait=False)
def test_clone_disabled_both(self):
- self.assert_command_effect(
- fixture_clone_cib_disabled_both,
- fixture_clone_status_managed,
- lambda: resource.enable(self.env, ["A-clone"], False),
- fixture_clone_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_clone_status_managed)
+ .env.push_cib(resources=fixture_clone_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A-clone"], wait=False)
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_cib_disabled_primitive,
- fixture_clone_status_unmanaged,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_clone_cib_enabled,
- reports=[
- fixture_report_unmanaged("A-clone"),
- fixture_report_unmanaged("A"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_clone_status_unmanaged)
+ .env.push_cib(resources=fixture_clone_cib_enabled)
+ )
+ resource.enable(self.env_assist.get_env(), ["A"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A-clone"),
+ fixture_report_unmanaged("A"),
+ ])
def test_clone_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_cib_disabled_clone,
- fixture_clone_status_unmanaged,
- lambda: resource.enable(self.env, ["A-clone"], False),
- fixture_clone_cib_enabled,
- reports=[
- fixture_report_unmanaged("A-clone"),
- fixture_report_unmanaged("A"),
- ]
- )
-
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_disabled_clone)
+ .runner.pcmk.load_state(resources=fixture_clone_status_unmanaged)
+ .env.push_cib(resources=fixture_clone_cib_enabled)
+ )
+ resource.enable(self.env_assist.get_env(), ["A-clone"], wait=False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A-clone"),
+ fixture_report_unmanaged("A"),
+ ])
-class DisableMaster(ResourceWithStateTest):
+class DisableMaster(TestCase):
# same as clone, minimum tests in here
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_enabled)
+ .runner.pcmk.load_state(resources=fixture_master_status_managed)
+ )
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_master_cib_enabled,
- fixture_master_status_managed,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_master_cib_disabled_primitive
+ self.config.env.push_cib(
+ resources=fixture_master_cib_disabled_primitive
)
+ resource.disable(self.env_assist.get_env(), ["A"], False)
def test_master(self):
- self.assert_command_effect(
- fixture_master_cib_enabled,
- fixture_master_status_managed,
- lambda: resource.disable(self.env, ["A-master"], False),
- fixture_master_cib_disabled_master
+ self.config.env.push_cib(
+ resources=fixture_master_cib_disabled_master
)
+ resource.disable(self.env_assist.get_env(), ["A-master"], False)
-
-class EnableMaster(ResourceWithStateTest):
+class EnableMaster(TestCase):
# same as clone, minimum tests in here
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_master_cib_disabled_primitive,
- fixture_master_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_master_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_master_status_managed)
+ .env.push_cib(resources=fixture_master_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
def test_primitive_disabled_both(self):
- self.assert_command_effect(
- fixture_master_cib_disabled_both,
- fixture_master_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_master_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_master_status_managed)
+ .env.push_cib(resources=fixture_master_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
def test_master(self):
- self.assert_command_effect(
- fixture_master_cib_disabled_master,
- fixture_master_status_managed,
- lambda: resource.enable(self.env, ["A-master"], False),
- fixture_master_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_disabled_master)
+ .runner.pcmk.load_state(resources=fixture_master_status_managed)
+ .env.push_cib(resources=fixture_master_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A-master"], False)
def test_master_disabled_both(self):
- self.assert_command_effect(
- fixture_master_cib_disabled_both,
- fixture_master_status_managed,
- lambda: resource.enable(self.env, ["A-master"], False),
- fixture_master_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_master_status_managed)
+ .env.push_cib(resources=fixture_master_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A-master"], False)
+
+class DisableClonedGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class DisableClonedGroup(ResourceWithStateTest):
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_group_cib_enabled,
- fixture_clone_group_status_managed,
- lambda: resource.disable(self.env, ["A-clone"], False),
- fixture_clone_group_cib_disabled_clone
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_disabled_clone)
)
+ resource.disable(self.env_assist.get_env(), ["A-clone"], False)
def test_group(self):
- self.assert_command_effect(
- fixture_clone_group_cib_enabled,
- fixture_clone_group_status_managed,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_clone_group_cib_disabled_group
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_disabled_group)
)
+ resource.disable(self.env_assist.get_env(), ["A"], False)
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_group_cib_enabled,
- fixture_clone_group_status_managed,
- lambda: resource.disable(self.env, ["A1"], False),
- fixture_clone_group_cib_disabled_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(
+ resources=fixture_clone_group_cib_disabled_primitive
+ )
)
+ resource.disable(self.env_assist.get_env(), ["A1"], False)
def test_clone_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_group_cib_enabled,
- fixture_clone_group_status_unmanaged,
- lambda: resource.disable(self.env, ["A-clone"], False),
- fixture_clone_group_cib_disabled_clone,
- reports=[
- fixture_report_unmanaged("A-clone"),
- ]
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_unmanaged
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_disabled_clone)
)
+ resource.disable(self.env_assist.get_env(), ["A-clone"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A-clone"),
+ ])
def test_group_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_group_cib_enabled,
- fixture_clone_group_status_unmanaged,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_clone_group_cib_disabled_group,
- reports=[
- fixture_report_unmanaged("A"),
- ]
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_unmanaged
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_disabled_group)
)
+ resource.disable(self.env_assist.get_env(), ["A"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A"),
+ ])
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_group_cib_enabled,
- fixture_clone_group_status_unmanaged,
- lambda: resource.disable(self.env, ["A1"], False),
- fixture_clone_group_cib_disabled_primitive,
- reports=[
- fixture_report_unmanaged("A1"),
- ]
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_enabled)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_unmanaged
+ )
+ .env.push_cib(
+ resources=fixture_clone_group_cib_disabled_primitive
+ )
)
+ resource.disable(self.env_assist.get_env(), ["A1"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A1"),
+ ])
+
+class EnableClonedGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class EnableClonedGroup(ResourceWithStateTest):
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_clone,
- fixture_clone_group_status_managed,
- lambda: resource.enable(self.env, ["A-clone"], False),
- fixture_clone_group_cib_enabled,
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_disabled_clone)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_enabled,)
)
+ resource.enable(self.env_assist.get_env(), ["A-clone"], False)
def test_clone_disabled_all(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_all,
- fixture_clone_group_status_managed,
- lambda: resource.enable(self.env, ["A-clone"], False),
- fixture_clone_group_cib_disabled_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_disabled_all)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(
+ resources=fixture_clone_group_cib_disabled_primitive
+ )
)
+ resource.enable(self.env_assist.get_env(), ["A-clone"], False)
def test_group(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_group,
- fixture_clone_group_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_clone_group_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_disabled_group)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
def test_group_disabled_all(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_all,
- fixture_clone_group_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_clone_group_cib_disabled_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_disabled_all)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(
+ resources=fixture_clone_group_cib_disabled_primitive
+ )
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_primitive,
- fixture_clone_group_status_managed,
- lambda: resource.enable(self.env, ["A1"], False),
- fixture_clone_group_cib_enabled
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_disabled_primitive
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A1"], False)
def test_primitive_disabled_all(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_all,
- fixture_clone_group_status_managed,
- lambda: resource.enable(self.env, ["A1"], False),
- fixture_clone_group_cib_disabled_clone_group
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_disabled_all)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_managed
+ )
+ .env.push_cib(
+ resources=fixture_clone_group_cib_disabled_clone_group
+ )
)
+ resource.enable(self.env_assist.get_env(), ["A1"], False)
def test_clone_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_clone,
- fixture_clone_group_status_unmanaged,
- lambda: resource.enable(self.env, ["A-clone"], False),
- fixture_clone_group_cib_enabled,
- reports=[
- fixture_report_unmanaged("A-clone"),
- fixture_report_unmanaged("A"),
- ]
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_disabled_clone)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_unmanaged
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A-clone"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A-clone"),
+ fixture_report_unmanaged("A"),
+ ])
def test_group_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_group,
- fixture_clone_group_status_unmanaged,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_clone_group_cib_enabled,
- reports=[
- fixture_report_unmanaged("A"),
- fixture_report_unmanaged("A-clone"),
- ]
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_disabled_group)
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_unmanaged
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A"),
+ fixture_report_unmanaged("A-clone"),
+ ])
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_clone_group_cib_disabled_primitive,
- fixture_clone_group_status_unmanaged,
- lambda: resource.enable(self.env, ["A1"], False),
- fixture_clone_group_cib_enabled,
- reports=[
- fixture_report_unmanaged("A1"),
- ]
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_disabled_primitive
+ )
+ .runner.pcmk.load_state(
+ resources=fixture_clone_group_status_unmanaged
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A1"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A1"),
+ ])
@skip_unless_pacemaker_supports_bundle
-class DisableBundle(ResourceWithStateTest):
+class DisableBundle(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_bundle_cib_enabled,
- fixture_bundle_status_managed,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_bundle_cib_disabled_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_enabled)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_managed)
+ .env.push_cib(resources=fixture_bundle_cib_disabled_primitive)
)
+ resource.disable(self.env_assist.get_env(), ["A"], False)
def test_bundle(self):
- self.assert_command_effect(
- fixture_bundle_cib_enabled,
- fixture_bundle_status_managed,
- lambda: resource.disable(self.env, ["A-bundle"], False),
- fixture_bundle_cib_disabled_bundle
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_enabled)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_managed)
+ .env.push_cib(resources=fixture_bundle_cib_disabled_bundle)
)
+ resource.disable(self.env_assist.get_env(), ["A-bundle"], False)
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_bundle_cib_enabled,
- fixture_bundle_status_unmanaged,
- lambda: resource.disable(self.env, ["A"], False),
- fixture_bundle_cib_disabled_primitive,
- reports=[
- fixture_report_unmanaged("A"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_enabled)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_unmanaged)
+ .env.push_cib(resources=fixture_bundle_cib_disabled_primitive)
+ )
+ resource.disable(self.env_assist.get_env(), ["A"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A"),
+ ])
def test_bundle_unmanaged(self):
- self.assert_command_effect(
- fixture_bundle_cib_enabled,
- fixture_bundle_status_unmanaged,
- lambda: resource.disable(self.env, ["A-bundle"], False),
- fixture_bundle_cib_disabled_bundle,
- reports=[
- fixture_report_unmanaged("A-bundle"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_enabled)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_unmanaged)
+ .env.push_cib(resources=fixture_bundle_cib_disabled_bundle)
+ )
+ resource.disable(self.env_assist.get_env(), ["A-bundle"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A-bundle"),
+ ])
@skip_unless_pacemaker_supports_bundle
-class EnableBundle(ResourceWithStateTest):
+class EnableBundle(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_bundle_cib_disabled_primitive,
- fixture_bundle_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_bundle_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_managed)
+ .env.push_cib(resources=fixture_bundle_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
def test_primitive_disabled_both(self):
- self.assert_command_effect(
- fixture_bundle_cib_disabled_both,
- fixture_bundle_status_managed,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_bundle_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_managed)
+ .env.push_cib(resources=fixture_bundle_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A"], False)
def test_bundle(self):
- self.assert_command_effect(
- fixture_bundle_cib_disabled_bundle,
- fixture_bundle_status_managed,
- lambda: resource.enable(self.env, ["A-bundle"], False),
- fixture_bundle_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_disabled_bundle)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_managed)
+ .env.push_cib(resources=fixture_bundle_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A-bundle"], False)
def test_bundle_disabled_both(self):
- self.assert_command_effect(
- fixture_bundle_cib_disabled_both,
- fixture_bundle_status_managed,
- lambda: resource.enable(self.env, ["A-bundle"], False),
- fixture_bundle_cib_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_disabled_both)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_managed)
+ .env.push_cib(resources=fixture_bundle_cib_enabled)
)
+ resource.enable(self.env_assist.get_env(), ["A-bundle"], False)
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_bundle_cib_disabled_primitive,
- fixture_bundle_status_unmanaged,
- lambda: resource.enable(self.env, ["A"], False),
- fixture_bundle_cib_enabled,
- reports=[
- fixture_report_unmanaged("A"),
- fixture_report_unmanaged("A-bundle"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_unmanaged)
+ .env.push_cib(resources=fixture_bundle_cib_enabled)
+ )
+ resource.enable(self.env_assist.get_env(), ["A"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A"),
+ fixture_report_unmanaged("A-bundle"),
+ ])
def test_bundle_unmanaged(self):
- self.assert_command_effect(
- fixture_bundle_cib_disabled_primitive,
- fixture_bundle_status_unmanaged,
- lambda: resource.enable(self.env, ["A-bundle"], False),
- fixture_bundle_cib_enabled,
- reports=[
- fixture_report_unmanaged("A-bundle"),
- fixture_report_unmanaged("A"),
- ]
- )
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_disabled_primitive)
+ .runner.pcmk.load_state(resources=fixture_bundle_status_unmanaged)
+ .env.push_cib(resources=fixture_bundle_cib_enabled)
+ )
+ resource.enable(self.env_assist.get_env(), ["A-bundle"], False)
+ self.env_assist.assert_reports([
+ fixture_report_unmanaged("A-bundle"),
+ fixture_report_unmanaged("A"),
+ ])
diff --git a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
index 95b44bc..5fbaf76 100644
--- a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
+++ b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
@@ -2,16 +2,14 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-
from pcs.common import report_codes
from pcs.lib.commands import resource
-from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
-import pcs.lib.commands.test.resource.fixture as fixture
from pcs.lib.errors import ReportItemSeverity as severities
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
fixture_primitive_cib_managed = """
@@ -655,373 +653,443 @@ def fixture_report_no_monitors(resource):
None
)
+class UnmanagePrimitive(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class UnmanagePrimitive(ResourceWithoutStateTest):
def test_nonexistent_resource(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(fixture_primitive_cib_managed)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_managed)
)
- assert_raise_library_error(
- lambda: resource.unmanage(self.env, ["B"]),
- fixture.report_not_found("B", "resources")
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.unmanage(self.env_assist.get_env(), ["B"]),
+ [
+ fixture.report_not_found("B", "resources")
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_primitive(self):
- self.assert_command_effect(
- fixture_primitive_cib_managed,
- lambda: resource.unmanage(self.env, ["A"]),
- fixture_primitive_cib_unmanaged
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_managed)
+ .env.push_cib(resources=fixture_primitive_cib_unmanaged)
)
+ resource.unmanage(self.env_assist.get_env(), ["A"])
def test_primitive_unmanaged(self):
- self.assert_command_effect(
- fixture_primitive_cib_unmanaged,
- lambda: resource.unmanage(self.env, ["A"]),
- fixture_primitive_cib_unmanaged
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_unmanaged)
+ .env.push_cib(resources=fixture_primitive_cib_unmanaged)
)
+ resource.unmanage(self.env_assist.get_env(), ["A"])
+
+class ManagePrimitive(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class ManagePrimitive(ResourceWithoutStateTest):
def test_nonexistent_resource(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(fixture_primitive_cib_unmanaged)
- )
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_unmanaged)
)
- assert_raise_library_error(
- lambda: resource.manage(self.env, ["B"]),
- fixture.report_not_found("B", "resources")
+ self.env_assist.assert_raise_library_error(
+ lambda: resource.manage(self.env_assist.get_env(), ["B"]),
+ [
+ fixture.report_not_found("B", "resources")
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_primitive(self):
- self.assert_command_effect(
- fixture_primitive_cib_unmanaged,
- lambda: resource.manage(self.env, ["A"]),
- fixture_primitive_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_unmanaged)
+ .env.push_cib(resources=fixture_primitive_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_primitive_managed(self):
- self.assert_command_effect(
- fixture_primitive_cib_managed,
- lambda: resource.manage(self.env, ["A"]),
- fixture_primitive_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_managed)
+ .env.push_cib(resources=fixture_primitive_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
-class UnmanageGroup(ResourceWithoutStateTest):
+class UnmanageGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_group_cib_managed,
- lambda: resource.unmanage(self.env, ["A1"]),
- fixture_group_cib_unmanaged_resource
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_managed)
+ .env.push_cib(resources=fixture_group_cib_unmanaged_resource)
)
+ resource.unmanage(self.env_assist.get_env(), ["A1"])
def test_group(self):
- self.assert_command_effect(
- fixture_group_cib_managed,
- lambda: resource.unmanage(self.env, ["A"]),
- fixture_group_cib_unmanaged_all_resources
+ (self.config
+ .runner.cib.load(resources=fixture_group_cib_managed)
+ .env.push_cib(
+ resources=fixture_group_cib_unmanaged_all_resources
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"])
-class ManageGroup(ResourceWithoutStateTest):
+class ManageGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_group_cib_unmanaged_all_resources,
- lambda: resource.manage(self.env, ["A2"]),
- fixture_group_cib_unmanaged_resource
+ (self.config
+ .runner.cib.load(
+ resources=fixture_group_cib_unmanaged_all_resources
+ )
+ .env.push_cib(resources=fixture_group_cib_unmanaged_resource)
)
+ resource.manage(self.env_assist.get_env(), ["A2"])
def test_primitive_unmanaged_group(self):
- self.assert_command_effect(
- fixture_group_cib_unmanaged_resource_and_group,
- lambda: resource.manage(self.env, ["A1"]),
- fixture_group_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_group_cib_unmanaged_resource_and_group
+ )
+ .env.push_cib(resources=fixture_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A1"])
def test_group(self):
- self.assert_command_effect(
- fixture_group_cib_unmanaged_all_resources,
- lambda: resource.manage(self.env, ["A"]),
- fixture_group_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_group_cib_unmanaged_all_resources
+ )
+ .env.push_cib(resources=fixture_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_group_unmanaged_group(self):
- self.assert_command_effect(
- fixture_group_cib_unmanaged_resource_and_group,
- lambda: resource.manage(self.env, ["A"]),
- fixture_group_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_group_cib_unmanaged_resource_and_group
+ )
+ .env.push_cib(resources=fixture_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
+
+class UnmanageClone(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class UnmanageClone(ResourceWithoutStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_cib_managed,
- lambda: resource.unmanage(self.env, ["A"]),
- fixture_clone_cib_unmanaged_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_managed)
+ .env.push_cib(resources=fixture_clone_cib_unmanaged_primitive)
)
+ resource.unmanage(self.env_assist.get_env(), ["A"])
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_cib_managed,
- lambda: resource.unmanage(self.env, ["A-clone"]),
- fixture_clone_cib_unmanaged_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_managed)
+ .env.push_cib(resources=fixture_clone_cib_unmanaged_primitive)
)
+ resource.unmanage(self.env_assist.get_env(), ["A-clone"])
+
+class ManageClone(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class ManageClone(ResourceWithoutStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_cib_unmanaged_clone,
- lambda: resource.manage(self.env, ["A"]),
- fixture_clone_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_unmanaged_clone)
+ .env.push_cib(resources=fixture_clone_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_primitive_unmanaged_primitive(self):
- self.assert_command_effect(
- fixture_clone_cib_unmanaged_primitive,
- lambda: resource.manage(self.env, ["A"]),
- fixture_clone_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_unmanaged_primitive)
+ .env.push_cib(resources=fixture_clone_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_primitive_unmanaged_both(self):
- self.assert_command_effect(
- fixture_clone_cib_unmanaged_both,
- lambda: resource.manage(self.env, ["A"]),
- fixture_clone_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_unmanaged_both)
+ .env.push_cib(resources=fixture_clone_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_cib_unmanaged_clone,
- lambda: resource.manage(self.env, ["A-clone"]),
- fixture_clone_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_unmanaged_clone)
+ .env.push_cib(resources=fixture_clone_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-clone"])
def test_clone_unmanaged_primitive(self):
- self.assert_command_effect(
- fixture_clone_cib_unmanaged_primitive,
- lambda: resource.manage(self.env, ["A-clone"]),
- fixture_clone_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_unmanaged_primitive)
+ .env.push_cib(resources=fixture_clone_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-clone"])
def test_clone_unmanaged_both(self):
- self.assert_command_effect(
- fixture_clone_cib_unmanaged_both,
- lambda: resource.manage(self.env, ["A-clone"]),
- fixture_clone_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_unmanaged_both)
+ .env.push_cib(resources=fixture_clone_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-clone"])
-class UnmanageMaster(ResourceWithoutStateTest):
+class UnmanageMaster(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_master_cib_managed,
- lambda: resource.unmanage(self.env, ["A"]),
- fixture_master_cib_unmanaged_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_managed)
+ .env.push_cib(resources=fixture_master_cib_unmanaged_primitive)
)
+ resource.unmanage(self.env_assist.get_env(), ["A"])
def test_master(self):
- self.assert_command_effect(
- fixture_master_cib_managed,
- lambda: resource.unmanage(self.env, ["A-master"]),
- fixture_master_cib_unmanaged_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_managed)
+ .env.push_cib(resources=fixture_master_cib_unmanaged_primitive)
)
+ resource.unmanage(self.env_assist.get_env(), ["A-master"])
+
+class ManageMaster(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class ManageMaster(ResourceWithoutStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_master_cib_unmanaged_primitive,
- lambda: resource.manage(self.env, ["A"]),
- fixture_master_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_unmanaged_primitive)
+ .env.push_cib(resources=fixture_master_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_primitive_unmanaged_master(self):
- self.assert_command_effect(
- fixture_master_cib_unmanaged_master,
- lambda: resource.manage(self.env, ["A"]),
- fixture_master_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_unmanaged_master)
+ .env.push_cib(resources=fixture_master_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_primitive_unmanaged_both(self):
- self.assert_command_effect(
- fixture_master_cib_unmanaged_both,
- lambda: resource.manage(self.env, ["A"]),
- fixture_master_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_unmanaged_both)
+ .env.push_cib(resources=fixture_master_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_master(self):
- self.assert_command_effect(
- fixture_master_cib_unmanaged_master,
- lambda: resource.manage(self.env, ["A-master"]),
- fixture_master_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_unmanaged_master)
+ .env.push_cib(resources=fixture_master_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-master"])
def test_master_unmanaged_primitive(self):
- self.assert_command_effect(
- fixture_master_cib_unmanaged_primitive,
- lambda: resource.manage(self.env, ["A-master"]),
- fixture_master_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_unmanaged_primitive)
+ .env.push_cib(resources=fixture_master_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-master"])
def test_master_unmanaged_both(self):
- self.assert_command_effect(
- fixture_master_cib_unmanaged_both,
- lambda: resource.manage(self.env, ["A-master"]),
- fixture_master_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_unmanaged_both)
+ .env.push_cib(resources=fixture_master_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-master"])
-class UnmanageClonedGroup(ResourceWithoutStateTest):
+class UnmanageClonedGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_group_cib_managed,
- lambda: resource.unmanage(self.env, ["A1"]),
- fixture_clone_group_cib_unmanaged_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_managed)
+ .env.push_cib(
+ resources=fixture_clone_group_cib_unmanaged_primitive
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A1"])
def test_group(self):
- self.assert_command_effect(
- fixture_clone_group_cib_managed,
- lambda: resource.unmanage(self.env, ["A"]),
- fixture_clone_group_cib_unmanaged_all_primitives
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_managed)
+ .env.push_cib(
+ resources=fixture_clone_group_cib_unmanaged_all_primitives
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"])
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_group_cib_managed,
- lambda: resource.unmanage(self.env, ["A-clone"]),
- fixture_clone_group_cib_unmanaged_all_primitives
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_managed)
+ .env.push_cib(
+ resources=fixture_clone_group_cib_unmanaged_all_primitives
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A-clone"])
+
+class ManageClonedGroup(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class ManageClonedGroup(ResourceWithoutStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_clone_group_cib_unmanaged_primitive,
- lambda: resource.manage(self.env, ["A1"]),
- fixture_clone_group_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_unmanaged_primitive
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A1"])
def test_primitive_unmanaged_all(self):
- self.assert_command_effect(
- fixture_clone_group_cib_unmanaged_everything,
- lambda: resource.manage(self.env, ["A2"]),
- fixture_clone_group_cib_unmanaged_primitive
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_unmanaged_everything
+ )
+ .env.push_cib(
+ resources=fixture_clone_group_cib_unmanaged_primitive
+ )
)
+ resource.manage(self.env_assist.get_env(), ["A2"])
def test_group(self):
- self.assert_command_effect(
- fixture_clone_group_cib_unmanaged_all_primitives,
- lambda: resource.manage(self.env, ["A"]),
- fixture_clone_group_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_unmanaged_all_primitives
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_group_unmanaged_all(self):
- self.assert_command_effect(
- fixture_clone_group_cib_unmanaged_everything,
- lambda: resource.manage(self.env, ["A"]),
- fixture_clone_group_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_unmanaged_everything
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_clone(self):
- self.assert_command_effect(
- fixture_clone_group_cib_unmanaged_clone,
- lambda: resource.manage(self.env, ["A-clone"]),
- fixture_clone_group_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_clone_group_cib_unmanaged_clone)
+ .env.push_cib(resources=fixture_clone_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-clone"])
def test_clone_unmanaged_all(self):
- self.assert_command_effect(
- fixture_clone_group_cib_unmanaged_everything,
- lambda: resource.manage(self.env, ["A-clone"]),
- fixture_clone_group_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_unmanaged_everything
+ )
+ .env.push_cib(resources=fixture_clone_group_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-clone"])
+
+class UnmanageBundle(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class UnmanageBundle(ResourceWithoutStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_bundle_cib_managed,
- lambda: resource.unmanage(self.env, ["A"]),
- fixture_bundle_cib_unmanaged_primitive
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_managed)
+ .env.push_cib(resources=fixture_bundle_cib_unmanaged_primitive)
)
+ resource.unmanage(self.env_assist.get_env(), ["A"])
def test_bundle(self):
- self.assert_command_effect(
- fixture_bundle_cib_managed,
- lambda: resource.unmanage(self.env, ["A-bundle"]),
- fixture_bundle_cib_unmanaged_both
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_managed)
+ .env.push_cib(resources=fixture_bundle_cib_unmanaged_both)
)
+ resource.unmanage(self.env_assist.get_env(), ["A-bundle"])
def test_bundle_empty(self):
- self.assert_command_effect(
- fixture_bundle_empty_cib_managed,
- lambda: resource.unmanage(self.env, ["A-bundle"]),
- fixture_bundle_empty_cib_unmanaged_bundle
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_empty_cib_managed)
+ .env.push_cib(
+ resources=fixture_bundle_empty_cib_unmanaged_bundle
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A-bundle"])
+
+class ManageBundle(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class ManageBundle(ResourceWithoutStateTest):
def test_primitive(self):
- self.assert_command_effect(
- fixture_bundle_cib_unmanaged_primitive,
- lambda: resource.manage(self.env, ["A"]),
- fixture_bundle_cib_managed,
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_unmanaged_primitive)
+ .env.push_cib(resources=fixture_bundle_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_primitive_unmanaged_bundle(self):
- self.assert_command_effect(
- fixture_bundle_cib_unmanaged_bundle,
- lambda: resource.manage(self.env, ["A"]),
- fixture_bundle_cib_managed,
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_unmanaged_bundle)
+ .env.push_cib(resources=fixture_bundle_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_primitive_unmanaged_both(self):
- self.assert_command_effect(
- fixture_bundle_cib_unmanaged_both,
- lambda: resource.manage(self.env, ["A"]),
- fixture_bundle_cib_managed,
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_unmanaged_both)
+ .env.push_cib(resources=fixture_bundle_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"])
def test_bundle(self):
- self.assert_command_effect(
- fixture_bundle_cib_unmanaged_bundle,
- lambda: resource.manage(self.env, ["A-bundle"]),
- fixture_bundle_cib_managed,
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_unmanaged_bundle)
+ .env.push_cib(resources=fixture_bundle_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-bundle"])
def test_bundle_unmanaged_primitive(self):
- self.assert_command_effect(
- fixture_bundle_cib_unmanaged_primitive,
- lambda: resource.manage(self.env, ["A-bundle"]),
- fixture_bundle_cib_managed,
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_unmanaged_primitive)
+ .env.push_cib(resources=fixture_bundle_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-bundle"])
def test_bundle_unmanaged_both(self):
- self.assert_command_effect(
- fixture_bundle_cib_unmanaged_both,
- lambda: resource.manage(self.env, ["A-bundle"]),
- fixture_bundle_cib_managed,
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_unmanaged_both)
+ .env.push_cib(resources=fixture_bundle_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-bundle"])
def test_bundle_empty(self):
- self.assert_command_effect(
- fixture_bundle_empty_cib_unmanaged_bundle,
- lambda: resource.manage(self.env, ["A-bundle"]),
- fixture_bundle_empty_cib_managed
+ (self.config
+ .runner.cib.load(
+ resources=fixture_bundle_empty_cib_unmanaged_bundle
+ )
+ .env.push_cib(resources=fixture_bundle_empty_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A-bundle"])
+
+class MoreResources(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
-class MoreResources(ResourceWithoutStateTest):
fixture_cib_managed = """
<resources>
<primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
@@ -1074,11 +1142,11 @@ class MoreResources(ResourceWithoutStateTest):
</primitive>
</resources>
"""
- self.assert_command_effect(
- self.fixture_cib_managed,
- lambda: resource.unmanage(self.env, ["A", "C"]),
- fixture_cib_unmanaged
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_managed)
+ .env.push_cib(resources=fixture_cib_unmanaged)
)
+ resource.unmanage(self.env_assist.get_env(), ["A", "C"])
def test_success_manage(self):
fixture_cib_managed = """
@@ -1095,153 +1163,194 @@ class MoreResources(ResourceWithoutStateTest):
</primitive>
</resources>
"""
- self.assert_command_effect(
- self.fixture_cib_unmanaged,
- lambda: resource.manage(self.env, ["A", "C"]),
- fixture_cib_managed
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_unmanaged)
+ .env.push_cib(resources=fixture_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A", "C"])
def test_bad_resource_unmanage(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(self.fixture_cib_managed)
- )
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_managed)
)
- assert_raise_library_error(
- lambda: resource.unmanage(self.env, ["B", "X", "Y", "A"]),
- fixture.report_not_found("X", "resources"),
- fixture.report_not_found("Y", "resources"),
+ self.env_assist.assert_raise_library_error(
+ lambda:
+ resource.unmanage(self.env_assist.get_env(), ["B", "X", "Y", "A"]),
+ [
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
def test_bad_resource_enable(self):
- self.runner.set_runs(
- fixture.call_cib_load(
- fixture.cib_resources(self.fixture_cib_unmanaged)
- )
+ (self.config
+ .runner.cib.load(resources=self.fixture_cib_unmanaged)
)
- assert_raise_library_error(
- lambda: resource.manage(self.env, ["B", "X", "Y", "A"]),
- fixture.report_not_found("X", "resources"),
- fixture.report_not_found("Y", "resources"),
+ self.env_assist.assert_raise_library_error(
+ lambda:
+ resource.manage(self.env_assist.get_env(), ["B", "X", "Y", "A"]),
+ [
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ ],
+ expected_in_processor=False
)
- self.runner.assert_everything_launched()
-class WithMonitor(ResourceWithoutStateTest):
+class WithMonitor(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
def test_unmanage_noop(self):
- self.assert_command_effect(
- fixture_primitive_cib_managed,
- lambda: resource.unmanage(self.env, ["A"], True),
- fixture_primitive_cib_unmanaged
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_managed)
+ .env.push_cib(resources=fixture_primitive_cib_unmanaged)
)
+ resource.unmanage(self.env_assist.get_env(), ["A"], True)
def test_manage_noop(self):
- self.assert_command_effect(
- fixture_primitive_cib_unmanaged,
- lambda: resource.manage(self.env, ["A"], True),
- fixture_primitive_cib_managed
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_unmanaged)
+ .env.push_cib(resources=fixture_primitive_cib_managed)
)
+ resource.manage(self.env_assist.get_env(), ["A"], True)
def test_unmanage(self):
- self.assert_command_effect(
- fixture_primitive_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A"], True),
- fixture_primitive_cib_unmanaged_op_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_primitive_cib_unmanaged_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"], True)
def test_manage(self):
- self.assert_command_effect(
- fixture_primitive_cib_unmanaged_op_disabled,
- lambda: resource.manage(self.env, ["A"], True),
- fixture_primitive_cib_managed_op_enabled
+ (self.config
+ .runner.cib.load(
+ resources=fixture_primitive_cib_unmanaged_op_disabled
+ )
+ .env.push_cib(resources=fixture_primitive_cib_managed_op_enabled)
)
+ resource.manage(self.env_assist.get_env(), ["A"], True)
def test_unmanage_enabled_monitors(self):
- self.assert_command_effect(
- fixture_primitive_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A"], False),
- fixture_primitive_cib_unmanaged_op_enabled
+ (self.config
+ .runner.cib.load(resources=fixture_primitive_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_primitive_cib_unmanaged_op_enabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"], False)
def test_manage_disabled_monitors(self):
- self.assert_command_effect(
- fixture_primitive_cib_unmanaged_op_disabled,
- lambda: resource.manage(self.env, ["A"], False),
- fixture_primitive_cib_managed_op_disabled,
- [
- fixture_report_no_monitors("A"),
- ]
+ (self.config
+ .runner.cib.load(
+ resources=fixture_primitive_cib_unmanaged_op_disabled
+ )
+ .env.push_cib(
+ resources=fixture_primitive_cib_managed_op_disabled
+ )
)
+ resource.manage(self.env_assist.get_env(), ["A"], False)
+ self.env_assist.assert_reports([
+ fixture_report_no_monitors("A"),
+ ])
def test_unmanage_clone(self):
- self.assert_command_effect(
- fixture_clone_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A-clone"], True),
- fixture_clone_cib_unmanaged_primitive_op_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_clone_cib_unmanaged_primitive_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A-clone"], True)
def test_unmanage_in_clone(self):
- self.assert_command_effect(
- fixture_clone_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A"], True),
- fixture_clone_cib_unmanaged_primitive_op_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_clone_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_clone_cib_unmanaged_primitive_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"], True)
def test_unmanage_master(self):
- self.assert_command_effect(
- fixture_master_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A-master"], True),
- fixture_master_cib_unmanaged_primitive_op_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_master_cib_unmanaged_primitive_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A-master"], True)
def test_unmanage_in_master(self):
- self.assert_command_effect(
- fixture_master_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A"], True),
- fixture_master_cib_unmanaged_primitive_op_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_master_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_master_cib_unmanaged_primitive_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"], True)
def test_unmanage_clone_with_group(self):
- self.assert_command_effect(
- fixture_clone_group_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A-clone"], True),
- fixture_clone_group_cib_unmanaged_all_primitives_op_disabled
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_managed_op_enabled
+ )
+ .env.push_cib(resources=
+ fixture_clone_group_cib_unmanaged_all_primitives_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A-clone"], True)
def test_unmanage_group_in_clone(self):
- self.assert_command_effect(
- fixture_clone_group_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A"], True),
- fixture_clone_group_cib_unmanaged_all_primitives_op_disabled
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_managed_op_enabled
+ )
+ .env.push_cib(resources=
+ fixture_clone_group_cib_unmanaged_all_primitives_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"], True)
def test_unmanage_in_cloned_group(self):
- self.assert_command_effect(
- fixture_clone_group_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A1"], True),
- fixture_clone_group_cib_unmanaged_primitive_op_disabled
+ (self.config
+ .runner.cib.load(
+ resources=fixture_clone_group_cib_managed_op_enabled
+ )
+ .env.push_cib(resources=
+ fixture_clone_group_cib_unmanaged_primitive_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A1"], True)
def test_unmanage_bundle(self):
- self.assert_command_effect(
- fixture_bundle_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A-bundle"], True),
- fixture_bundle_cib_unmanaged_both_op_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_bundle_cib_unmanaged_both_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A-bundle"], True)
def test_unmanage_in_bundle(self):
- self.assert_command_effect(
- fixture_bundle_cib_managed_op_enabled,
- lambda: resource.unmanage(self.env, ["A"], True),
- fixture_bundle_cib_unmanaged_primitive_op_disabled
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_cib_managed_op_enabled)
+ .env.push_cib(
+ resources=fixture_bundle_cib_unmanaged_primitive_op_disabled
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A"], True)
def test_unmanage_bundle_empty(self):
- self.assert_command_effect(
- fixture_bundle_empty_cib_managed,
- lambda: resource.unmanage(self.env, ["A-bundle"], True),
- fixture_bundle_empty_cib_unmanaged_bundle
+ (self.config
+ .runner.cib.load(resources=fixture_bundle_empty_cib_managed)
+ .env.push_cib(
+ resources=fixture_bundle_empty_cib_unmanaged_bundle
+ )
)
+ resource.unmanage(self.env_assist.get_env(), ["A-bundle"], True)
diff --git a/pcs/lib/commands/test/sbd/__init__.py b/pcs/lib/commands/test/sbd/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/commands/test/sbd/test_disable_sbd.py b/pcs/lib/commands/test/sbd/test_disable_sbd.py
new file mode 100644
index 0000000..ed2bc46
--- /dev/null
+++ b/pcs/lib/commands/test/sbd/test_disable_sbd.py
@@ -0,0 +1,57 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import json
+
+from pcs.common import report_codes
+from pcs.lib.commands.sbd import disable_sbd
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+class DisableSbd(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def test_base(self):
+ (self.config
+ .runner.corosync.version()
+ .corosync_conf.load(
+ node_name_list=["node-1", "node-2"],
+ )
+ .http.add_communication(
+ "check_auth",
+ [
+ dict(
+ label="node-1",
+ output=json.dumps({"notauthorized": "true"}),
+ response_code=401,
+ ),
+ dict(
+ label="node-2",
+ output=json.dumps({"success": "true"}),
+ response_code=200,
+ ),
+ ],
+ action="remote/check_auth",
+ param_list=[('check_auth_only', 1)]
+ )
+ )
+
+ self.env_assist.assert_raise_library_error(
+ lambda: disable_sbd(self.env_assist.get_env()),
+ [],
+ )
+
+ self.env_assist.assert_reports([
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ node="node-1",
+ reason="HTTP error: 401",
+ command="remote/check_auth",
+ )
+ ])
diff --git a/pcs/lib/commands/test/sbd/test_enable_sbd.py b/pcs/lib/commands/test/sbd/test_enable_sbd.py
new file mode 100644
index 0000000..e870c5a
--- /dev/null
+++ b/pcs/lib/commands/test/sbd/test_enable_sbd.py
@@ -0,0 +1,172 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import json
+
+from pcs.common import report_codes
+from pcs.lib.commands.sbd import enable_sbd
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.misc import outdent
+
+class EnableSbd(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+ (self.config
+ .runner.corosync.version()
+ .corosync_conf.load(
+ node_name_list=["node-1", "node-2"],
+ auto_tie_breaker=True,
+ )
+ .http.add_communication(
+ "check_auth",
+ [
+ dict(
+ label="node-1",
+ output=json.dumps({"success": True}),
+ response_code=200,
+ ),
+ dict(
+ label="node-2",
+ was_connected=False,
+ errno=7,
+ error_msg="Failed connect to node-2:2224;"
+ " No route to host"
+ ,
+ ),
+ ],
+ action="remote/check_auth",
+ param_list=[("check_auth_only", 1)],
+ )
+ )
+
+ def test_fail_when_any_node_is_offline(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=None,
+ watchdog_dict={},
+ sbd_options={},
+ ),
+ [],
+ )
+ self.env_assist.assert_reports([
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node="node-2",
+ reason="Failed connect to node-2:2224; No route to host",
+ command="remote/check_auth",
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ )
+ ])
+
+ def test_success_enable(self):
+ (self.config
+ .http.add_communication(
+ "check_sbd",
+ [
+ dict(label="node-1"),
+ ],
+ output=json.dumps({
+ "sbd":{
+ "installed": True,
+ "enabled": False,
+ "running": False
+ },
+ "watchdog":{
+ "path": "/dev/watchdog",
+ "exist": True,
+ }
+ }),
+ response_code=200,
+ action="remote/check_sbd",
+ param_list=[
+ ("watchdog", "/dev/watchdog"),
+ ("device_list", [])
+ ],
+ )
+ .corosync_conf.load(
+ node_name_list=["node-1", "node-2"],
+ auto_tie_breaker=True,
+ name="corosync_conf.load-extra",
+ )
+ .http.add_communication(
+ "set_sbd_config",
+ [
+ dict(label="node-1"),
+ ],
+ output=json.dumps({
+ "sbd":{
+ "installed": True,
+ "enabled": False,
+ "running": False
+ },
+ "watchdog":{
+ "path": "/dev/watchdog",
+ "exist": True,
+ }
+ }),
+ response_code=200,
+ action="remote/set_sbd_config",
+ param_list=[("config", outdent(
+ """\
+ # This file has been generated by pcs.
+ SBD_DELAY_START=no
+ SBD_OPTS="-n node-1"
+ SBD_PACEMAKER=yes
+ SBD_STARTMODE=always
+ SBD_WATCHDOG_DEV=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=5
+ """
+ ))],
+ )
+ .http.add_communication(
+ "remove_stonith_watchdog_timeout",
+ [
+ dict(label="node-1"),
+ ],
+ output="OK",
+ response_code=200,
+ action="remote/remove_stonith_watchdog_timeout",
+ )
+ .http.add_communication(
+ "sbd_enable",
+ [
+ dict(label="node-1"),
+ ],
+ output="SBD enabled",
+ response_code=200,
+ action="remote/sbd_enable",
+ )
+ )
+ enable_sbd(
+ self.env_assist.get_env(),
+ default_watchdog=None,
+ watchdog_dict={},
+ sbd_options={},
+ ignore_offline_nodes=True,
+ )
+ self.env_assist.assert_reports([
+ fixture.info(report_codes.SBD_ENABLING_STARTED),
+ fixture.warn(
+ report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES
+ ),
+ fixture.warn(report_codes.OMITTING_NODE, node="node-2"),
+ fixture.info(report_codes.SBD_CHECK_STARTED),
+ fixture.info(report_codes.SBD_CHECK_SUCCESS, node="node-1"),
+ fixture.info(report_codes.SBD_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1"
+ ),
+ fixture.info(
+ report_codes.SERVICE_ENABLE_SUCCESS,
+ node="node-1",
+ instance=None,
+ service="sbd",
+ ),
+ ])
diff --git a/pcs/lib/commands/test/sbd/test_get_cluster_sbd_config.py b/pcs/lib/commands/test/sbd/test_get_cluster_sbd_config.py
new file mode 100644
index 0000000..fbdb9f9
--- /dev/null
+++ b/pcs/lib/commands/test/sbd/test_get_cluster_sbd_config.py
@@ -0,0 +1,124 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.common import report_codes
+from pcs.lib.commands.sbd import get_cluster_sbd_config
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.misc import outdent
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+class GetClusterSbdConfig(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def test_different_responses(self):
+ (self.config
+ .runner.corosync.version()
+ .corosync_conf.load(
+ node_name_list=[
+ "node-1",
+ "node-2",
+ "node-3",
+ "node-4",
+ "node-5",
+ ],
+ auto_tie_breaker=True,
+ )
+ .http.add_communication(
+ "get_sbd_config",
+ [
+ dict(
+ label="node-1",
+ output=outdent(
+ """\
+ # This file has been generated by pcs.
+ SBD_DELAY_START=no
+ SBD_OPTS="-n node-1"
+ SBD_PACEMAKER=yes
+ SBD_STARTMODE=always
+ SBD_WATCHDOG_DEV=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=5
+ """
+ ),
+ response_code=200,
+ ),
+ dict(
+ label="node-2",
+ was_connected=False,
+ errno=7,
+ error_msg="Failed connect to node-2:2224;"
+ " No route to host"
+ ,
+ ),
+ dict(
+ label="node-3",
+ output= "OPTION= value",
+ response_code=200,
+ ),
+ dict(
+ label="node-4",
+ output= "# just comment",
+ response_code=200,
+ ),
+ dict(
+ label="node-5",
+ output= "invalid value",
+ response_code=200,
+ ),
+ ],
+ action="remote/get_sbd_config",
+ )
+ )
+ self.assertEqual(
+ get_cluster_sbd_config(self.env_assist.get_env()),
+ [
+ {
+ 'node': 'node-1',
+ 'config': {
+ 'SBD_WATCHDOG_TIMEOUT': '5',
+ 'SBD_WATCHDOG_DEV': '/dev/watchdog',
+ 'SBD_PACEMAKER': 'yes',
+ 'SBD_OPTS': '"-n node-1"',
+ 'SBD_STARTMODE': 'always',
+ 'SBD_DELAY_START': 'no'
+ },
+ },
+ {
+ 'node': 'node-3',
+ 'config': {
+ "OPTION": "value",
+ }
+ },
+ {
+ 'node': 'node-4',
+ 'config': {},
+ },
+ {
+ 'node': 'node-5',
+ 'config': {},
+ },
+ {
+ 'node': 'node-2',
+ 'config': None,
+ },
+ ]
+ )
+
+ self.env_assist.assert_reports([
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node="node-2",
+ reason="Failed connect to node-2:2224; No route to host",
+ command="remote/get_sbd_config",
+ ),
+ fixture.warn(
+ report_codes.UNABLE_TO_GET_SBD_CONFIG,
+ node="node-2",
+ reason="",
+ ),
+ ])
diff --git a/pcs/lib/commands/test/sbd/test_get_cluster_sbd_status.py b/pcs/lib/commands/test/sbd/test_get_cluster_sbd_status.py
new file mode 100644
index 0000000..1d57f91
--- /dev/null
+++ b/pcs/lib/commands/test/sbd/test_get_cluster_sbd_status.py
@@ -0,0 +1,149 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from functools import partial
+import json
+
+from pcs.common import report_codes
+from pcs.lib.commands.sbd import get_cluster_sbd_status
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+warn_unable_to_get_sbd_status = partial(
+ fixture.warn,
+ report_codes.UNABLE_TO_GET_SBD_STATUS,
+)
+
+class GetClusterSbdStatus(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(self)
+
+ def test_default_different_results_on_different_nodes(self):
+ (self.config
+ .runner.corosync.version()
+ .corosync_conf.load(
+ node_name_list=[
+ "node-1",
+ "node-2",
+ "node-3",
+ "node-4",
+ "node-5"
+ ]
+ )
+ .http.add_communication(
+ "check_sbd",
+ [
+ dict(
+ label="node-1",
+ output='{"notauthorized":"true"}',
+ response_code=401,
+ ),
+ dict(
+ label="node-2",
+ was_connected=False,
+ errno=6,
+ error_msg="Could not resolve host: node-2;"
+ " Name or service not known"
+ ,
+ ),
+ dict(
+ label="node-3",
+ output=json.dumps({
+ "sbd":{
+ "installed": True,
+ "enabled": False,
+ "running":False
+ },
+ "watchdog":{
+ "path":"",
+ "exist":False
+ },
+ "device_list":[]
+ }),
+ response_code=200,
+ ),
+ dict(
+ label="node-4",
+ output=json.dumps({
+ "watchdog":{
+ "path":"",
+ "exist":False
+ },
+ "device_list":[]
+ }),
+ response_code=200,
+ ),
+ dict(
+ label="node-5",
+ output="invalid json",
+ response_code=200,
+ ),
+ ],
+ action="remote/check_sbd",
+ param_list=[("watchdog", ""), ("device_list", "[]")],
+ )
+ )
+
+ default_status = {
+ 'running': None,
+ 'enabled': None,
+ 'installed': None,
+ }
+ self.assertEqual(
+ get_cluster_sbd_status(self.env_assist.get_env()),
+ [
+ {
+ 'node': 'node-3',
+ 'status': {
+ 'running': False,
+ 'enabled': False,
+ 'installed': True,
+ }
+ },
+ {
+ 'node': 'node-1',
+ 'status': default_status
+ },
+ {
+ 'node': 'node-2',
+ 'status': default_status
+ },
+ {
+ 'node': 'node-4',
+ 'status': default_status
+ },
+ {
+ 'node': 'node-5',
+ 'status': default_status
+ },
+ ]
+ )
+ self.env_assist.assert_reports([
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ node="node-1",
+ reason="HTTP error: 401",
+ command="remote/check_sbd",
+ ),
+ warn_unable_to_get_sbd_status(node="node-1", reason=""),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node="node-2",
+ reason=
+ "Could not resolve host: node-2; Name or service not known"
+ ,
+ command="remote/check_sbd",
+ ),
+ warn_unable_to_get_sbd_status(node="node-2", reason=""),
+ warn_unable_to_get_sbd_status(node="node-4", reason="'sbd'"),
+ warn_unable_to_get_sbd_status(
+ node="node-5",
+ #the reason differs in python3
+ #reason="No JSON object could be decoded",
+ ),
+ ])
diff --git a/pcs/lib/commands/test/test_acl.py b/pcs/lib/commands/test/test_acl.py
index c18f63c..827df08 100644
--- a/pcs/lib/commands/test/test_acl.py
+++ b/pcs/lib/commands/test/test_acl.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import pcs.lib.commands.acl as cmd_acl
@@ -27,7 +26,7 @@ class AclCommandsTest(TestCase, ExtendedAssertionsMixin):
self.mock_env.get_cib.assert_called_once_with(REQUIRED_CIB_VERSION)
def assert_same_cib_pushed(self):
- self.mock_env.push_cib.assert_called_once_with(self.cib)
+ self.mock_env.push_cib.assert_called_once_with()
def assert_cib_not_pushed(self):
self.assertEqual(0, self.mock_env.push_cib.call_count)
@@ -40,7 +39,7 @@ class CibAclSection(TestCase):
with cmd_acl.cib_acl_section(env):
pass
env.get_cib.assert_called_once_with(cmd_acl.REQUIRED_CIB_VERSION)
- env.push_cib.assert_called_once_with("cib")
+ env.push_cib.assert_called_once_with()
def test_does_not_push_cib_on_exception(self):
env = mock.MagicMock()
diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
index 294fe00..cba7378 100644
--- a/pcs/lib/commands/test/test_alert.py
+++ b/pcs/lib/commands/test/test_alert.py
@@ -2,84 +2,30 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
+from functools import partial
import logging
-from lxml import etree
-
-from pcs.test.tools.pcs_unittest import TestCase
-
-from pcs.test.tools.pcs_unittest import mock
-from pcs.test.tools.assertions import (
- assert_raise_library_error,
- assert_xml_equal,
- assert_report_item_list_equal,
-)
-from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.common import report_codes
from pcs.lib.errors import ReportItemSeverity as Severities
from pcs.lib.env import LibraryEnvironment
from pcs.lib.external import CommandRunner
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock, TestCase
import pcs.lib.commands.alert as cmd_alert
- at mock.patch("pcs.lib.env.ensure_cib_version")
-class CreateAlertTest(TestCase):
- def setUp(self):
- self.mock_log = mock.MagicMock(spec_set=logging.Logger)
- self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- self.mock_rep = MockLibraryReportProcessor()
- self.mock_env = LibraryEnvironment(
- self.mock_log, self.mock_rep, cib_data="<cib/>"
- )
-
- def test_no_path(self, mock_ensure_cib_version):
- assert_raise_library_error(
- lambda: cmd_alert.create_alert(
- self.mock_env, None, None, None, None
- ),
- (
- Severities.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_names": ["path"]}
- )
- )
- mock_ensure_cib_version.assert_not_called()
+get_env_tools = partial(
+ get_env_tools,
+ base_cib_filename="cib-empty-2.5.xml",
+ exception_reports_in_processor_by_default=False,
+)
- def test_upgrade_needed(self, mock_ensure_cib_version):
- original_cib_xml = """
- <cib validate-with="pacemaker-2.4.1">
- <configuration>
- </configuration>
- </cib>
- """
- self.mock_env._push_cib_xml(original_cib_xml)
- mock_ensure_cib_version.return_value = etree.XML(
- """
- <cib validate-with="pacemaker-2.5.0">
- <configuration>
- </configuration>
- </cib>
- """
- )
- cmd_alert.create_alert(
- self.mock_env,
- "my-alert",
- "/my/path",
- {
- "instance": "value",
- "another": "val"
- },
- {"meta1": "val1"},
- "my description"
- )
- assert_xml_equal(
- """
-<cib validate-with="pacemaker-2.5.0">
- <configuration>
+class CreateAlertTest(TestCase):
+ fixture_final_alerts = """
<alerts>
<alert id="my-alert" path="/my/path" description="my description">
<meta_attributes id="my-alert-meta_attributes">
@@ -103,28 +49,76 @@ class CreateAlertTest(TestCase):
</instance_attributes>
</alert>
</alerts>
- </configuration>
-</cib>
- """,
- self.mock_env._get_cib_xml()
+ """
+
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def test_no_path(self):
+ self.env_assist.assert_raise_library_error(
+ lambda: cmd_alert.create_alert(
+ self.env_assist.get_env(), None, None, None, None
+ ),
+ [
+ (
+ Severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {"option_names": ["path"]},
+ None
+ ),
+ ],
)
- self.assertEqual(1, mock_ensure_cib_version.call_count)
+ def test_create_no_upgrade(self):
+ (self.config
+ .runner.cib.load()
+ .env.push_cib(optional_in_conf=self.fixture_final_alerts)
+ )
+ cmd_alert.create_alert(
+ self.env_assist.get_env(),
+ "my-alert",
+ "/my/path",
+ {
+ "instance": "value",
+ "another": "val"
+ },
+ {"meta1": "val1"},
+ "my description"
+ )
-class UpdateAlertTest(TestCase):
- def setUp(self):
- self.mock_log = mock.MagicMock(spec_set=logging.Logger)
- self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- self.mock_rep = MockLibraryReportProcessor()
- self.mock_env = LibraryEnvironment(
- self.mock_log, self.mock_rep, cib_data="<cib/>"
+ def test_create_upgrade(self):
+ (self.config
+ .runner.cib.load(
+ filename="cib-empty.xml",
+ name="load_cib_old_version"
+ )
+ .runner.cib.upgrade()
+ .runner.cib.load()
+ .env.push_cib(optional_in_conf=self.fixture_final_alerts)
)
+ cmd_alert.create_alert(
+ self.env_assist.get_env(),
+ "my-alert",
+ "/my/path",
+ {
+ "instance": "value",
+ "another": "val"
+ },
+ {"meta1": "val1"},
+ "my description"
+ )
+ self.env_assist.assert_reports([
+ (
+ Severities.INFO,
+ report_codes.CIB_UPGRADE_SUCCESSFUL,
+ {},
+ None
+ ),
+ ])
- def test_update_all(self):
- self.mock_env._push_cib_xml(
- """
-<cib validate-with="pacemaker-2.5">
- <configuration>
+
+class UpdateAlertTest(TestCase):
+ fixture_initial_alerts = """
<alerts>
<alert id="my-alert" path="/my/path" description="my description">
<instance_attributes id="my-alert-instance_attributes">
@@ -148,25 +142,12 @@ class UpdateAlertTest(TestCase):
</meta_attributes>
</alert>
</alerts>
- </configuration>
-</cib>
- """
- )
- cmd_alert.update_alert(
- self.mock_env,
- "my-alert",
- "/another/one",
- {
- "instance": "",
- "my-attr": "its_val"
- },
- {"meta1": "val2"},
- ""
- )
- assert_xml_equal(
- """
-<cib validate-with="pacemaker-2.5">
- <configuration>
+ """
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def test_update_all(self):
+ fixture_final_alerts = """
<alerts>
<alert id="my-alert" path="/another/one">
<instance_attributes id="my-alert-instance_attributes">
@@ -190,231 +171,239 @@ class UpdateAlertTest(TestCase):
</meta_attributes>
</alert>
</alerts>
- </configuration>
-</cib>
- """,
- self.mock_env._get_cib_xml()
+ """
+ (self.config
+ .runner.cib.load(optional_in_conf=self.fixture_initial_alerts)
+ .env.push_cib(
+ replace={"./configuration/alerts": fixture_final_alerts}
+ )
+ )
+ cmd_alert.update_alert(
+ self.env_assist.get_env(),
+ "my-alert",
+ "/another/one",
+ {
+ "instance": "",
+ "my-attr": "its_val"
+ },
+ {"meta1": "val2"},
+ ""
)
def test_update_instance_attribute(self):
- self.mock_env._push_cib_xml(
- """
-<cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="my-alert" path="/my/path" description="my description">
- <instance_attributes id="my-alert-instance_attributes">
- <nvpair
- id="my-alert-instance_attributes-instance"
- name="instance"
- value="value"
- />
- </instance_attributes>
- </alert>
- </alerts>
- </configuration>
-</cib>
- """
+ (self.config
+ .runner.cib.load(optional_in_conf=self.fixture_initial_alerts)
+ .env.push_cib(
+ replace={
+ './configuration/alerts/alert[@id="my-alert"]/'
+ 'instance_attributes/nvpair[@name="instance"]'
+ : """
+ <nvpair
+ id="my-alert-instance_attributes-instance"
+ name="instance"
+ value="new_val"
+ />
+ """
+ }
+ )
)
cmd_alert.update_alert(
- self.mock_env,
+ self.env_assist.get_env(),
"my-alert",
None,
{"instance": "new_val"},
{},
None
)
- assert_xml_equal(
- """
-<cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="my-alert" path="/my/path" description="my description">
- <instance_attributes id="my-alert-instance_attributes">
- <nvpair
- id="my-alert-instance_attributes-instance"
- name="instance"
- value="new_val"
- />
- </instance_attributes>
- </alert>
- </alerts>
- </configuration>
-</cib>
- """,
- self.mock_env._get_cib_xml()
- )
def test_alert_doesnt_exist(self):
- self.mock_env._push_cib_xml(
- """
- <cib validate-with="pacemaker-2.5">
- <configuration>
+ (self.config
+ .runner.cib.load(
+ optional_in_conf="""
<alerts>
<alert id="alert" path="path"/>
</alerts>
- </configuration>
- </cib>
- """
+ """
+ )
)
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: cmd_alert.update_alert(
- self.mock_env, "unknown", "test", {}, {}, None
+ self.env_assist.get_env(), "unknown", "test", {}, {}, None
),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {"id": "unknown"}
- )
+ [
+ (
+ Severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "context_type": "alerts",
+ "context_id": "",
+ "id": "unknown",
+ "id_description": "alert",
+ },
+ None
+ ),
+ ],
)
class RemoveAlertTest(TestCase):
def setUp(self):
- self.mock_log = mock.MagicMock(spec_set=logging.Logger)
- self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- self.mock_rep = MockLibraryReportProcessor()
- cib = """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert1" path="path"/>
- <alert id="alert2" path="/path"/>
- <alert id="alert3" path="/path"/>
- <alert id="alert4" path="/path"/>
- </alerts>
- </configuration>
- </cib>
- """
- self.mock_env = LibraryEnvironment(
- self.mock_log, self.mock_rep, cib_data=cib
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(
+ optional_in_conf="""
+ <alerts>
+ <alert id="alert1" path="path"/>
+ <alert id="alert2" path="/path"/>
+ <alert id="alert3" path="/path"/>
+ <alert id="alert4" path="/path"/>
+ </alerts>
+ """
)
def test_one_alert(self):
- cmd_alert.remove_alert(self.mock_env, ["alert2"])
- assert_xml_equal(
- """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert1" path="path"/>
- <alert id="alert3" path="/path"/>
- <alert id="alert4" path="/path"/>
- </alerts>
- </configuration>
- </cib>
- """,
- self.mock_env._get_cib_xml()
- )
- self.assertEqual([], self.mock_rep.report_item_list)
+ self.config.env.push_cib(
+ remove="./configuration/alerts/alert[@id='alert2']"
+ )
+ cmd_alert.remove_alert(
+ self.env_assist.get_env(),
+ ["alert2"]
+ )
def test_multiple_alerts(self):
- cmd_alert.remove_alert(self.mock_env, ["alert1", "alert3", "alert4"])
- assert_xml_equal(
- """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert2" path="/path"/>
- </alerts>
- </configuration>
- </cib>
- """,
- self.mock_env._get_cib_xml()
- )
- self.assertEqual([], self.mock_rep.report_item_list)
+ self.config.env.push_cib(
+ remove=[
+ "./configuration/alerts/alert[@id='alert1']",
+ "./configuration/alerts/alert[@id='alert3']",
+ "./configuration/alerts/alert[@id='alert4']",
+ ]
+ )
+ cmd_alert.remove_alert(
+ self.env_assist.get_env(),
+ ["alert1", "alert3", "alert4"]
+ )
def test_no_alert(self):
- cmd_alert.remove_alert(self.mock_env, [])
- assert_xml_equal(
- """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert1" path="path"/>
- <alert id="alert2" path="/path"/>
- <alert id="alert3" path="/path"/>
- <alert id="alert4" path="/path"/>
- </alerts>
- </configuration>
- </cib>
- """,
- self.mock_env._get_cib_xml()
- )
- self.assertEqual([], self.mock_rep.report_item_list)
-
- def test_failure(self):
- report_list = [
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {"id": "unknown"}
- ),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {"id": "unknown2"}
- )
- ]
- assert_raise_library_error(
+ self.config.env.push_cib()
+ cmd_alert.remove_alert(
+ self.env_assist.get_env(),
+ []
+ )
+
+ def test_alerts_dont_exist(self):
+ self.env_assist.assert_raise_library_error(
lambda: cmd_alert.remove_alert(
- self.mock_env, ["unknown", "alert1", "unknown2", "alert2"]
+ self.env_assist.get_env(),
+ ["unknown1", "alert1", "unknown2", "alert2"]
),
- *report_list
- )
- assert_report_item_list_equal(
- self.mock_rep.report_item_list, report_list
+ [
+ (
+ Severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "context_type": "alerts",
+ "context_id": "",
+ "id": "unknown1",
+ "id_description": "alert",
+ },
+ None
+ ),
+ (
+ Severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "context_type": "alerts",
+ "context_id": "",
+ "id": "unknown2",
+ "id_description": "alert",
+ },
+ None
+ ),
+ ],
+ expected_in_processor=True
)
class AddRecipientTest(TestCase):
def setUp(self):
- self.mock_log = mock.MagicMock(spec_set=logging.Logger)
- self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- self.mock_rep = MockLibraryReportProcessor()
- cib = """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path">
- <recipient id="alert-recipient" value="value1"/>
- </alert>
- </alerts>
- </configuration>
- </cib>
- """
- self.mock_env = LibraryEnvironment(
- self.mock_log, self.mock_rep, cib_data=cib
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(
+ optional_in_conf="""
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ </alert>
+ </alerts>
+ """
)
def test_value_not_defined(self):
- assert_raise_library_error(
+ self.config.remove("runner.cib.load")
+ self.env_assist.assert_raise_library_error(
lambda: cmd_alert.add_recipient(
- self.mock_env, "unknown", "", {}, {}
+ self.env_assist.get_env(), "unknown", "", {}, {}
),
- (
- Severities.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_names": ["value"]}
- )
+ [
+ (
+ Severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {"option_names": ["value"]}
+ )
+ ],
)
def test_recipient_already_exists(self):
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: cmd_alert.add_recipient(
- self.mock_env, "alert", "value1", {}, {},
+ self.env_assist.get_env(), "alert", "value1", {}, {},
recipient_id="alert-recipient"
),
- (
- Severities.ERROR,
- report_codes.ID_ALREADY_EXISTS,
- {"id": "alert-recipient"}
- )
+ [
+ (
+ Severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {"id": "alert-recipient"}
+ )
+ ],
)
def test_without_id(self):
+ self.config.env.push_cib(
+ replace={
+ './/alert[@id="alert"]' :
+ """
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient id="alert-recipient-1" value="value">
+ <meta_attributes
+ id="alert-recipient-1-meta_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr2"
+ name="attr2"
+ value="val2"
+ />
+ </meta_attributes>
+ <instance_attributes
+ id="alert-recipient-1-instance_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-instance_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ </instance_attributes>
+ </recipient>
+ </alert>
+ """
+ }
+ )
cmd_alert.add_recipient(
- self.mock_env,
+ self.env_assist.get_env(),
"alert",
"value",
{"attr1": "val1"},
@@ -423,49 +412,45 @@ class AddRecipientTest(TestCase):
"attr1": "val1"
}
)
- assert_xml_equal(
- """
-<cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path">
- <recipient id="alert-recipient" value="value1"/>
- <recipient id="alert-recipient-1" value="value">
- <meta_attributes
- id="alert-recipient-1-meta_attributes"
- >
- <nvpair
- id="alert-recipient-1-meta_attributes-attr1"
- name="attr1"
- value="val1"
- />
- <nvpair
- id="alert-recipient-1-meta_attributes-attr2"
- name="attr2"
- value="val2"
- />
- </meta_attributes>
- <instance_attributes
- id="alert-recipient-1-instance_attributes"
- >
- <nvpair
- id="alert-recipient-1-instance_attributes-attr1"
- name="attr1"
- value="val1"
- />
- </instance_attributes>
- </recipient>
- </alert>
- </alerts>
- </configuration>
-</cib>
- """,
- self.mock_env._get_cib_xml()
- )
def test_with_id(self):
+ self.config.env.push_cib(
+ replace={
+ './/alert[@id="alert"]':
+ """
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient id="my-recipient" value="value">
+ <meta_attributes
+ id="my-recipient-meta_attributes"
+ >
+ <nvpair
+ id="my-recipient-meta_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ <nvpair
+ id="my-recipient-meta_attributes-attr2"
+ name="attr2"
+ value="val2"
+ />
+ </meta_attributes>
+ <instance_attributes
+ id="my-recipient-instance_attributes"
+ >
+ <nvpair
+ id="my-recipient-instance_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ </instance_attributes>
+ </recipient>
+ </alert>
+ """
+ }
+ )
cmd_alert.add_recipient(
- self.mock_env,
+ self.env_assist.get_env(),
"alert",
"value",
{"attr1": "val1"},
@@ -475,122 +460,122 @@ class AddRecipientTest(TestCase):
},
recipient_id="my-recipient"
)
- assert_xml_equal(
- """
-<cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path">
- <recipient id="alert-recipient" value="value1"/>
- <recipient id="my-recipient" value="value">
- <meta_attributes
- id="my-recipient-meta_attributes"
- >
- <nvpair
- id="my-recipient-meta_attributes-attr1"
- name="attr1"
- value="val1"
- />
- <nvpair
- id="my-recipient-meta_attributes-attr2"
- name="attr2"
- value="val2"
- />
- </meta_attributes>
- <instance_attributes
- id="my-recipient-instance_attributes"
- >
- <nvpair
- id="my-recipient-instance_attributes-attr1"
- name="attr1"
- value="val1"
- />
- </instance_attributes>
- </recipient>
- </alert>
- </alerts>
- </configuration>
-</cib>
- """,
- self.mock_env._get_cib_xml()
- )
-
class UpdateRecipientTest(TestCase):
def setUp(self):
- self.mock_log = mock.MagicMock(spec_set=logging.Logger)
- self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- self.mock_rep = MockLibraryReportProcessor()
- cib = """
-<cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path">
- <recipient id="alert-recipient" value="value1"/>
- <recipient id="alert-recipient-1" value="value" description="d">
- <meta_attributes
- id="alert-recipient-1-meta_attributes"
- >
- <nvpair
- id="alert-recipient-1-meta_attributes-attr1"
- name="attr1"
- value="val1"
- />
- <nvpair
- id="alert-recipient-1-meta_attributes-attr2"
- name="attr2"
- value="val2"
- />
- </meta_attributes>
- <instance_attributes
- id="alert-recipient-1-instance_attributes"
- >
- <nvpair
- id="alert-recipient-1-instance_attributes-attr1"
- name="attr1"
- value="val1"
- />
- </instance_attributes>
- </recipient>
- </alert>
- </alerts>
- </configuration>
-</cib>
- """
- self.mock_env = LibraryEnvironment(
- self.mock_log, self.mock_rep, cib_data=cib
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(
+ optional_in_conf="""
+ <alerts>
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient id="alert-recipient-1" value="value"
+ description="d"
+ >
+ <meta_attributes
+ id="alert-recipient-1-meta_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr2"
+ name="attr2"
+ value="val2"
+ />
+ </meta_attributes>
+ <instance_attributes
+ id="alert-recipient-1-instance_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-instance_attributes-attr1"
+ name="attr1"
+ value="val1"
+ />
+ </instance_attributes>
+ </recipient>
+ </alert>
+ </alerts>
+ """
)
def test_empty_value(self):
- assert_raise_library_error(
+ self.config.remove("runner.cib.load")
+ self.env_assist.assert_raise_library_error(
lambda: cmd_alert.update_recipient(
- self.mock_env, "alert-recipient-1", {}, {}, recipient_value=""
+ self.env_assist.get_env(),
+ "alert-recipient-1", {}, {}, recipient_value=""
),
- (
- Severities.ERROR,
- report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID,
- {"recipient": ""}
- )
+ [
+ (
+ Severities.ERROR,
+ report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID,
+ {"recipient": ""}
+ )
+ ],
)
def test_recipient_not_found(self):
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: cmd_alert.update_recipient(
- self.mock_env, "recipient", {}, {}
+ self.env_assist.get_env(), "recipient", {}, {}
),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "recipient",
- "id_description": "recipient"
- }
- )
+ [
+ (
+ Severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "recipient",
+ "id_description": "recipient"
+ }
+ )
+ ],
)
def test_update_all(self):
+ self.config.env.push_cib(
+ replace={
+ './/alert[@id="alert"]':
+ """
+ <alert id="alert" path="path">
+ <recipient id="alert-recipient" value="value1"/>
+ <recipient
+ id="alert-recipient-1"
+ value="new_val"
+ description="desc"
+ >
+ <meta_attributes
+ id="alert-recipient-1-meta_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr2"
+ name="attr2"
+ value="val2"
+ />
+ <nvpair
+ id="alert-recipient-1-meta_attributes-attr3"
+ name="attr3"
+ value="new_val"
+ />
+ </meta_attributes>
+ <instance_attributes
+ id="alert-recipient-1-instance_attributes"
+ >
+ <nvpair
+ id="alert-recipient-1-instance_attributes-attr1"
+ name="attr1"
+ value="value"
+ />
+ </instance_attributes>
+ </recipient>
+ </alert>
+ """,
+ }
+ )
cmd_alert.update_recipient(
- self.mock_env,
+ self.env_assist.get_env(),
"alert-recipient-1",
{"attr1": "value"},
{
@@ -600,166 +585,79 @@ class UpdateRecipientTest(TestCase):
recipient_value="new_val",
description="desc"
)
- assert_xml_equal(
- """
-<cib validate-with="pacemaker-2.5">
- <configuration>
+
+
+class RemoveRecipientTest(TestCase):
+ fixture_initial_alerts = """
<alerts>
<alert id="alert" path="path">
- <recipient id="alert-recipient" value="value1"/>
- <recipient
- id="alert-recipient-1"
- value="new_val"
- description="desc"
- >
- <meta_attributes
- id="alert-recipient-1-meta_attributes"
- >
- <nvpair
- id="alert-recipient-1-meta_attributes-attr2"
- name="attr2"
- value="val2"
- />
- <nvpair
- id="alert-recipient-1-meta_attributes-attr3"
- name="attr3"
- value="new_val"
- />
- </meta_attributes>
- <instance_attributes
- id="alert-recipient-1-instance_attributes"
- >
- <nvpair
- id="alert-recipient-1-instance_attributes-attr1"
- name="attr1"
- value="value"
- />
- </instance_attributes>
- </recipient>
+ <recipient id="alert-recip1" value="value1"/>
+ <recipient id="alert-recip2" value="value2"/>
+ </alert>
+ <alert id="alert2" path="path">
+ <recipient id="alert2-recip3" value="value3"/>
+ <recipient id="alert2-recip4" value="value4"/>
</alert>
</alerts>
- </configuration>
-</cib>
- """,
- self.mock_env._get_cib_xml()
- )
+ """
-
-class RemoveRecipientTest(TestCase):
def setUp(self):
- self.mock_log = mock.MagicMock(spec_set=logging.Logger)
- self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- self.mock_rep = MockLibraryReportProcessor()
- cib = """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path">
- <recipient id="alert-recipient1" value="value1"/>
- <recipient id="alert-recipient2" value="value2"/>
- </alert>
- <alert id="alert2" path="path">
- <recipient id="alert2-recipient3" value="value3"/>
- <recipient id="alert2-recipient4" value="value4"/>
- </alert>
- </alerts>
- </configuration>
- </cib>
- """
- self.mock_env = LibraryEnvironment(
- self.mock_log, self.mock_rep, cib_data=cib
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load(
+ optional_in_conf=self.fixture_initial_alerts
)
def test_recipient_not_found(self):
- report_list = [
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {"id": "recipient"}
- ),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {"id": "alert2-recipient1"}
- )
- ]
- assert_raise_library_error(
+ self.env_assist.assert_raise_library_error(
lambda: cmd_alert.remove_recipient(
- self.mock_env,
- ["recipient", "alert-recipient1", "alert2-recipient1"]
+ self.env_assist.get_env(),
+ ["recipient", "alert-recip1", "alert2-recip1"]
),
- *report_list
- )
- assert_report_item_list_equal(
- self.mock_rep.report_item_list, report_list
+ [
+ (
+ Severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {"id": "recipient"},
+ None
+ ),
+ (
+ Severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {"id": "alert2-recip1"},
+ None
+ )
+ ],
+ expected_in_processor=True
)
def test_one_recipient(self):
- cmd_alert.remove_recipient(self.mock_env, ["alert-recipient1"])
- assert_xml_equal(
- """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path">
- <recipient id="alert-recipient2" value="value2"/>
- </alert>
- <alert id="alert2" path="path">
- <recipient id="alert2-recipient3" value="value3"/>
- <recipient id="alert2-recipient4" value="value4"/>
- </alert>
- </alerts>
- </configuration>
- </cib>
- """,
- self.mock_env._get_cib_xml()
+ self.config.env.push_cib(
+ remove="./configuration/alerts/alert/recipient[@id='alert-recip1']"
+ )
+ cmd_alert.remove_recipient(
+ self.env_assist.get_env(),
+ ["alert-recip1"]
)
- self.assertEqual([], self.mock_rep.report_item_list)
def test_multiple_recipients(self):
- cmd_alert.remove_recipient(
- self.mock_env,
- ["alert-recipient1", "alert-recipient2", "alert2-recipient4"]
+ self.config.env.push_cib(
+ remove=[
+ "./configuration/alerts/alert/recipient[@id='alert-recip1']",
+ "./configuration/alerts/alert/recipient[@id='alert-recip2']",
+ "./configuration/alerts/alert/recipient[@id='alert2-recip4']",
+ ]
)
- assert_xml_equal(
- """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path"/>
- <alert id="alert2" path="path">
- <recipient id="alert2-recipient3" value="value3"/>
- </alert>
- </alerts>
- </configuration>
- </cib>
- """,
- self.mock_env._get_cib_xml()
+ cmd_alert.remove_recipient(
+ self.env_assist.get_env(),
+ ["alert-recip1", "alert-recip2", "alert2-recip4"]
)
- self.assertEqual([], self.mock_rep.report_item_list)
def test_no_recipient(self):
- cmd_alert.remove_recipient(self.mock_env, [])
- assert_xml_equal(
- """
- <cib validate-with="pacemaker-2.5">
- <configuration>
- <alerts>
- <alert id="alert" path="path">
- <recipient id="alert-recipient1" value="value1"/>
- <recipient id="alert-recipient2" value="value2"/>
- </alert>
- <alert id="alert2" path="path">
- <recipient id="alert2-recipient3" value="value3"/>
- <recipient id="alert2-recipient4" value="value4"/>
- </alert>
- </alerts>
- </configuration>
- </cib>
- """,
- self.mock_env._get_cib_xml()
+ self.config.env.push_cib()
+ cmd_alert.remove_recipient(
+ self.env_assist.get_env(),
+ []
)
- self.assertEqual([], self.mock_rep.report_item_list)
@mock.patch("pcs.lib.cib.alert.get_all_alerts")
diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
index 4e091a2..cd43c2c 100644
--- a/pcs/lib/commands/test/test_booth.py
+++ b/pcs/lib/commands/test/test_booth.py
@@ -2,13 +2,12 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
import base64
-from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase, skip
from pcs.test.tools.pcs_unittest import mock
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
@@ -139,6 +138,7 @@ class ConfigDestroyTest(TestCase):
)
])
+ at skip("TODO: rewrite using new testing fremework")
@mock.patch("pcs.lib.commands.booth.config_structure.get_authfile")
@mock.patch("pcs.lib.commands.booth.parse")
@mock.patch("pcs.lib.booth.config_files.read_authfile")
@@ -381,6 +381,7 @@ class StopBoothTest(TestCase):
mock_is_systemctl.assert_called_once_with()
+ at skip("TODO: rewrite using new testing fremework")
@mock.patch("pcs.lib.booth.sync.pull_config_from_node")
class PullConfigTest(TestCase):
def setUp(self):
diff --git a/pcs/lib/commands/test/test_constraint_common.py b/pcs/lib/commands/test/test_constraint_common.py
index cb5e177..bf35938 100644
--- a/pcs/lib/commands/test/test_constraint_common.py
+++ b/pcs/lib/commands/test/test_constraint_common.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
@@ -60,7 +59,7 @@ class CreateWithSetTest(TestCase):
def test_put_new_constraint_to_constraint_section(self):
self.create()
- self.env.push_cib.assert_called_once_with(self.cib)
+ self.env.push_cib.assert_called_once_with()
self.independent_cib.find(".//constraints").append(etree.XML("""
<rsc_some id="some_id" symmetrical="true">
<resource_set id="pcs_rsc_set_A_B" role="Master">
@@ -80,7 +79,7 @@ class CreateWithSetTest(TestCase):
def test_refuse_duplicate(self):
self.create()
- self.env.push_cib.assert_called_once_with(self.cib)
+ self.env.push_cib.assert_called_once_with()
assert_raise_library_error(self.create, (
severities.ERROR,
report_codes.DUPLICATE_CONSTRAINTS_EXIST,
@@ -107,8 +106,8 @@ class CreateWithSetTest(TestCase):
self.create()
self.create(duplication_alowed=True)
expected_calls = [
- mock.call(self.cib),
- mock.call(self.cib),
+ mock.call(),
+ mock.call(),
]
self.assertEqual(self.env.push_cib.call_count, len(expected_calls))
self.env.push_cib.assert_has_calls(expected_calls)
diff --git a/pcs/lib/commands/test/test_fencing_topology.py b/pcs/lib/commands/test/test_fencing_topology.py
index a6139c4..42e8c29 100644
--- a/pcs/lib/commands/test/test_fencing_topology.py
+++ b/pcs/lib/commands/test/test_fencing_topology.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
@@ -58,7 +57,7 @@ class AddLevel(TestCase):
mock_status.assert_called_once_with("mock get_cluster_status_xml")
mock_get_topology.assert_called_once_with("mocked cib")
mock_get_resources.assert_called_once_with("mocked cib")
- mock_push_cib.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_called_once_with()
def test_success(
self, mock_get_cib, mock_status_xml, mock_status, mock_push_cib,
@@ -192,7 +191,7 @@ class RemoveAllLevels(TestCase):
mock_remove.assert_called_once_with("topology el")
mock_get_topology.assert_called_once_with("mocked cib")
- mock_push_cib.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_called_once_with()
@patch_command("cib_fencing_topology.remove_levels_by_params")
@@ -218,7 +217,7 @@ class RemoveLevelsByParams(TestCase):
"ignore"
)
mock_get_topology.assert_called_once_with("mocked cib")
- mock_push_cib.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_called_once_with()
@patch_command("cib_fencing_topology.verify")
diff --git a/pcs/lib/commands/test/test_node.py b/pcs/lib/commands/test/test_node.py
index 13f25dc..1dadd06 100644
--- a/pcs/lib/commands/test/test_node.py
+++ b/pcs/lib/commands/test/test_node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
@@ -283,7 +282,7 @@ class CibRunnerNodes(TestCase):
get_cluster_status_xml.assert_called_once_with("mocked cmd_runner")
ClusterState.assert_called_once_with("mock get_cluster_status_xml")
- push_cib.assert_called_once_with("mocked cib", wait)
+ push_cib.assert_called_once_with(wait=wait)
@patch_env("ensure_wait_satisfiable", mock.Mock(side_effect=LibraryError))
def test_raises_when_wait_is_not_satisfiable(self, push_cib):
diff --git a/pcs/lib/commands/test/test_resource_agent.py b/pcs/lib/commands/test/test_resource_agent.py
index fd1c2bb..6e02941 100644
--- a/pcs/lib/commands/test/test_resource_agent.py
+++ b/pcs/lib/commands/test/test_resource_agent.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import logging
diff --git a/pcs/lib/commands/test/test_stonith_agent.py b/pcs/lib/commands/test/test_stonith_agent.py
index f3a4fe9..d021795 100644
--- a/pcs/lib/commands/test/test_stonith_agent.py
+++ b/pcs/lib/commands/test/test_stonith_agent.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import logging
@@ -323,3 +322,40 @@ class ValidateParameters(TestCase):
[
],
)
+
+
+ at mock.patch.object(lib_ra.StonithAgent, "get_actions")
+class StonithAgentMetadataGetCibDefaultActions(TestCase):
+ fixture_actions = [
+ {"name": "custom1", "timeout": "40s"},
+ {"name": "custom2", "interval": "25s", "timeout": "60s"},
+ {"name": "meta-data"},
+ {"name": "monitor", "interval": "10s", "timeout": "30s"},
+ {"name": "start", "interval": "40s"},
+ {"name": "status", "interval": "15s", "timeout": "20s"},
+ {"name": "validate-all"},
+ ]
+
+ def setUp(self):
+ self.agent = lib_ra.StonithAgent(
+ mock.MagicMock(spec_set=CommandRunner),
+ "fence_dummy"
+ )
+
+ def test_select_only_actions_for_cib(self, get_actions):
+ get_actions.return_value = self.fixture_actions
+ self.assertEqual(
+ [
+ {"name": "monitor", "interval": "10s", "timeout": "30s"}
+ ],
+ self.agent.get_cib_default_actions()
+ )
+
+ def test_select_only_necessary_actions_for_cib(self, get_actions):
+ get_actions.return_value = self.fixture_actions
+ self.assertEqual(
+ [
+ {"name": "monitor", "interval": "10s", "timeout": "30s"}
+ ],
+ self.agent.get_cib_default_actions(necessary_only=True)
+ )
diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
index 7287bc6..956985e 100644
--- a/pcs/lib/commands/test/test_ticket.py
+++ b/pcs/lib/commands/test/test_ticket.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
@@ -13,10 +12,8 @@ from pcs.common import report_codes
from pcs.lib.commands.constraint import ticket as ticket_command
from pcs.lib.errors import ReportItemSeverity as severities
from pcs.lib.test.misc import get_mocked_env
-from pcs.test.tools.assertions import (
- assert_xml_equal,
- assert_raise_library_error
-)
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
@@ -29,31 +26,38 @@ class CreateTest(TestCase):
)
def test_sucess_create(self):
- resource_xml = '<primitive id="resourceA" class="service" type="exim"/>'
- cib = (
- self.create_cib()
- .append_to_first_tag_name('resources', resource_xml)
- )
-
- env = get_mocked_env(cib_data=str(cib))
- ticket_command.create(env, "ticketA", "resourceA", {
- "loss-policy": "fence",
- "rsc-role": "master"
- })
-
- assert_xml_equal(
- env._get_cib_xml(),
- str(cib.append_to_first_tag_name(
- 'constraints', """
- <rsc_ticket
- id="ticket-ticketA-resourceA-Master"
- rsc="resourceA"
- rsc-role="Master"
- ticket="ticketA"
- loss-policy="fence"
- />
+ env_assist, config = get_env_tools(test_case=self)
+ (config
+ .runner.cib.load(
+ resources="""
+ <resources>
+ <primitive id="resourceA" class="service" type="exim"/>
+ </resources>
+ """
+ )
+ .env.push_cib(
+ optional_in_conf="""
+ <constraints>
+ <rsc_ticket
+ id="ticket-ticketA-resourceA-Master"
+ rsc="resourceA"
+ rsc-role="Master"
+ ticket="ticketA"
+ loss-policy="fence"
+ />
+ </constraints>
"""
- ))
+ )
+ )
+
+ ticket_command.create(
+ env_assist.get_env(),
+ "ticketA",
+ "resourceA",
+ {
+ "loss-policy": "fence",
+ "rsc-role": "master"
+ }
)
def test_refuse_for_nonexisting_resource(self):
diff --git a/pcs/lib/communication/__init__.py b/pcs/lib/communication/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/communication/booth.py b/pcs/lib/communication/booth.py
new file mode 100644
index 0000000..240738a
--- /dev/null
+++ b/pcs/lib/communication/booth.py
@@ -0,0 +1,113 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import base64
+import json
+import os
+
+from pcs.common.node_communicator import RequestData
+from pcs.lib import reports
+from pcs.lib.communication.tools import (
+ AllAtOnceStrategyMixin,
+ AllSameDataMixin,
+ RunRemotelyBase,
+ SkipOfflineMixin,
+ SimpleResponseProcessingMixin,
+)
+
+
+class BoothSendConfig(
+ SimpleResponseProcessingMixin, SkipOfflineMixin, AllSameDataMixin,
+ AllAtOnceStrategyMixin, RunRemotelyBase,
+):
+ def __init__(
+ self, report_processor, booth_name, config_data,
+ authfile=None, authfile_data=None, skip_offline_targets=False
+ ):
+ super(BoothSendConfig, self).__init__(report_processor)
+ self._set_skip_offline(skip_offline_targets)
+ self._booth_name = booth_name
+ self._config_data = config_data
+ self._authfile = authfile
+ self._authfile_data = authfile_data
+
+ def _get_request_data(self):
+ data = {
+ "config": {
+ "name": "{0}.conf".format(self._booth_name),
+ "data": self._config_data
+ }
+ }
+ if self._authfile is not None and self._authfile_data is not None:
+ data["authfile"] = {
+ "name": os.path.basename(self._authfile),
+ "data": base64.b64encode(self._authfile_data).decode("utf-8")
+ }
+ return RequestData(
+ "remote/booth_set_config", [("data_json", json.dumps(data))]
+ )
+
+ def _get_success_report(self, node_label):
+ return reports.booth_config_accepted_by_node(
+ node_label, [self._booth_name]
+ )
+
+ def before(self):
+ self._report(reports.booth_config_distribution_started())
+
+
+class ProcessJsonDataMixin(object):
+ __data = None
+
+ @property
+ def _data(self):
+ if self.__data is None:
+ self.__data = []
+ return self.__data
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ if report is not None:
+ self._report(report)
+ return
+ target = response.request.target
+ try:
+ self._data.append((target, json.loads(response.data)))
+ except ValueError:
+ self._report(reports.invalid_response_format(target.label))
+
+ def on_complete(self):
+ return self._data
+
+
+class BoothGetConfig(
+ ProcessJsonDataMixin, AllSameDataMixin, AllAtOnceStrategyMixin,
+ RunRemotelyBase,
+):
+ def __init__(self, report_processor, booth_name):
+ super(BoothGetConfig, self).__init__(report_processor)
+ self._booth_name = booth_name
+
+ def _get_request_data(self):
+ return RequestData(
+ "remote/booth_get_config", [("name", self._booth_name)]
+ )
+
+
+class BoothSaveFiles(
+ ProcessJsonDataMixin, AllSameDataMixin, AllAtOnceStrategyMixin,
+ RunRemotelyBase,
+):
+ def __init__(self, report_processor, file_list, rewrite_existing=True):
+ super(BoothSaveFiles, self).__init__(report_processor)
+ self._file_list = file_list
+ self._rewrite_existing = rewrite_existing
+
+ def _get_request_data(self):
+ data = [("data_json", json.dumps(self._file_list))]
+ if self._rewrite_existing:
+ data.append(("rewrite_existing", "1"))
+ return RequestData("remote/booth_save_files", data)
diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py
new file mode 100644
index 0000000..dca74a9
--- /dev/null
+++ b/pcs/lib/communication/corosync.py
@@ -0,0 +1,89 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import json
+
+from pcs.common.node_communicator import RequestData
+from pcs.lib import reports
+from pcs.lib.communication.tools import (
+ AllAtOnceStrategyMixin,
+ AllSameDataMixin,
+ RunRemotelyBase,
+ SkipOfflineMixin,
+)
+
+
+class CheckCorosyncOffline(
+ SkipOfflineMixin, AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(self, report_processor, skip_offline_targets=False):
+ super(CheckCorosyncOffline, self).__init__(report_processor)
+ self._set_skip_offline(skip_offline_targets)
+
+ def _get_request_data(self):
+ return RequestData("remote/status")
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ node_label = response.request.target.label
+ if report is not None:
+ self._report_list([
+ report,
+ reports.corosync_not_running_check_node_error(
+ node_label,
+ self._failure_severity,
+ self._failure_forceable,
+ )
+ ])
+ return
+ try:
+ status = response.data
+ if not json.loads(status)["corosync"]:
+ report = reports.corosync_not_running_on_node_ok(node_label)
+ else:
+ report = reports.corosync_running_on_node_fail(node_label)
+ except (ValueError, LookupError):
+ report = reports.corosync_not_running_check_node_error(
+ node_label, self._failure_severity, self._failure_forceable
+ )
+ self._report(report)
+
+ def before(self):
+ self._report(reports.corosync_not_running_check_started())
+
+
+class DistributeCorosyncConf(
+ SkipOfflineMixin, AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(
+ self, report_processor, config_text, skip_offline_targets=False
+ ):
+ super(DistributeCorosyncConf, self).__init__(report_processor)
+ self._config_text = config_text
+ self._set_skip_offline(skip_offline_targets)
+
+ def _get_request_data(self):
+ return RequestData(
+ "remote/set_corosync_conf", [("corosync_conf", self._config_text)]
+ )
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ node_label = response.request.target.label
+ if report is None:
+ self._report(reports.corosync_config_accepted_by_node(node_label))
+ else:
+ self._report_list([
+ report,
+ reports.corosync_config_distribution_node_error(
+ node_label,
+ self._failure_severity,
+ self._failure_forceable,
+ )
+ ])
+
+ def before(self):
+ self._report(reports.corosync_config_distribution_started())
diff --git a/pcs/lib/communication/nodes.py b/pcs/lib/communication/nodes.py
new file mode 100644
index 0000000..64d36cb
--- /dev/null
+++ b/pcs/lib/communication/nodes.py
@@ -0,0 +1,261 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import json
+
+from pcs.common import report_codes
+from pcs.common.node_communicator import RequestData
+from pcs.lib import reports, node_communication_format
+from pcs.lib.communication.tools import (
+ AllAtOnceStrategyMixin,
+ AllSameDataMixin,
+ RunRemotelyBase,
+ SkipOfflineMixin,
+)
+from pcs.lib.errors import ReportItemSeverity
+from pcs.lib.node_communication import response_to_report_item
+
+
+class GetOnlineTargets(
+ AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(self, report_processor, ignore_offline_targets=False):
+ super(GetOnlineTargets, self).__init__(report_processor)
+ self._ignore_offline_targets = ignore_offline_targets
+ self._online_target_list = []
+
+ def _get_request_data(self):
+ return RequestData("remote/check_auth", [("check_auth_only", 1)])
+
+ def _process_response(self, response):
+ report = response_to_report_item(response)
+ if report is None:
+ self._online_target_list.append(response.request.target)
+ return
+ if not response.was_connected:
+ report = (
+ reports.omitting_node(response.request.target.label)
+ if self._ignore_offline_targets
+ else response_to_report_item(
+ response, forceable=report_codes.SKIP_OFFLINE_NODES
+ )
+ )
+ self._report(report)
+
+ def on_complete(self):
+ return self._online_target_list
+
+
+class PrecheckNewNode(
+ SkipOfflineMixin, AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(
+ self, report_items, check_response, skip_offline_targets=False
+ ):
+ super(PrecheckNewNode, self).__init__(None)
+ self._set_skip_offline(skip_offline_targets)
+ self._report_items = report_items
+ self._check_response = check_response
+
+ def _get_request_data(self):
+ return RequestData("remote/node_available")
+
+ def _process_response(self, response):
+ # do not send outside any report, just append them into specified list
+ report = self._get_response_report(response)
+ if report:
+ self._report_items.append(report)
+ return
+ target = response.request.target
+ data = None
+ try:
+ data = json.loads(response.data)
+ except ValueError:
+ self._report_items.append(
+ reports.invalid_response_format(target.label)
+ )
+ return
+ is_in_expected_format = (
+ #node_available is a mandatory field
+ isinstance(data, dict) and "node_available" in data
+ )
+ if not is_in_expected_format:
+ self._report_items.append(
+ reports.invalid_response_format(target.label)
+ )
+ return
+ self._check_response(data, self._report_items, target.label)
+
+
+class RunActionBase(
+ SkipOfflineMixin, AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(
+ self, report_processor, action_definition,
+ skip_offline_targets=False, allow_fails=False, description="",
+ ):
+ super(RunActionBase, self).__init__(report_processor)
+ self._init_properties()
+ self._set_skip_offline(skip_offline_targets)
+ self._action_error_force = _force(self._force_code, allow_fails)
+ self._action_definition = action_definition
+ self._description = description
+
+ def _init_properties(self):
+ raise NotImplementedError()
+
+ def _is_success(self, action_response):
+ raise NotImplementedError()
+
+ def _get_request_data(self):
+ return RequestData(
+ self._request_url,
+ [("data_json", json.dumps(self._action_definition))],
+ )
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ if report:
+ self._report(report)
+ return
+ results = None
+ target = response.request.target
+ try:
+ results = json.loads(response.data)
+ except ValueError:
+ self._report(reports.invalid_response_format(target.label))
+ return
+ results = node_communication_format.response_to_result(
+ results,
+ self._response_key,
+ self._action_definition.keys(),
+ target.label
+ )
+ for key, item_response in sorted(results.items()):
+ if self._is_success(item_response):
+ #only success process individually
+ report = self._success_report(target.label, key)
+ else:
+ report = self._failure_report(
+ target.label,
+ key,
+ node_communication_format.get_format_result(
+ self._code_message_map
+ )(item_response),
+ **self._action_error_force
+ )
+ self._report(report)
+
+ def before(self):
+ self._report(self._start_report(
+ self._action_definition.keys(),
+ [target.label for target in self._target_list],
+ self._description
+ ))
+
+
+class ServiceAction(RunActionBase):
+ def _init_properties(self):
+ self._request_url = "remote/manage_services"
+ self._response_key = "actions"
+ self._force_code = report_codes.SKIP_ACTION_ON_NODES_ERRORS
+ self._start_report = reports.service_commands_on_nodes_started
+ self._success_report = reports.service_command_on_node_success
+ self._failure_report = reports.service_command_on_node_error
+ self._code_message_map = {"fail": "Operation failed."}
+
+ def _is_success(self, action_response):
+ return action_response.code == "success"
+
+
+class FileActionBase(RunActionBase):
+ #pylint: disable=abstract-method
+ def _init_properties(self):
+ self._response_key = "files"
+ self._force_code = report_codes.SKIP_FILE_DISTRIBUTION_ERRORS
+
+
+class DistributeFiles(FileActionBase):
+ def _init_properties(self):
+ super(DistributeFiles, self)._init_properties()
+ self._request_url = "remote/put_file"
+ self._start_report = reports.files_distribution_started
+ self._success_report = reports.file_distribution_success
+ self._failure_report = reports.file_distribution_error
+ self._code_message_map = {"conflict": "File already exists"}
+
+ def _is_success(self, action_response):
+ return action_response.code in ["written", "rewritten", "same_content"]
+
+
+class RemoveFiles(FileActionBase):
+ def _init_properties(self):
+ super(RemoveFiles, self)._init_properties()
+ self._request_url = "remote/remove_file"
+ self._start_report = reports.files_remove_from_node_started
+ self._success_report = reports.file_remove_from_node_success
+ self._failure_report = reports.file_remove_from_node_error
+ self._code_message_map = {}
+
+ def _is_success(self, action_response):
+ return action_response.code in ["deleted", "not_found"]
+
+
+def _force(force_code, is_forced):
+ if is_forced:
+ return dict(
+ severity=ReportItemSeverity.WARNING,
+ forceable=None,
+ )
+ return dict(
+ severity=ReportItemSeverity.ERROR,
+ forceable=force_code,
+ )
+
+
+def availability_checker_node(availability_info, report_items, node_label):
+ """
+ Check if availability_info means that the node is suitable as cluster
+ (corosync) node.
+ """
+ if availability_info["node_available"]:
+ return
+
+ if availability_info.get("pacemaker_running", False):
+ report_items.append(reports.cannot_add_node_is_running_service(
+ node_label,
+ "pacemaker"
+ ))
+ return
+
+ if availability_info.get("pacemaker_remote", False):
+ report_items.append(reports.cannot_add_node_is_running_service(
+ node_label,
+ "pacemaker_remote"
+ ))
+ return
+
+ report_items.append(reports.cannot_add_node_is_in_cluster(node_label))
+
+def availability_checker_remote_node(
+ availability_info, report_items, node_label
+):
+ """
+ Check if availability_info means that the node is suitable as remote node.
+ """
+ if availability_info["node_available"]:
+ return
+
+ if availability_info.get("pacemaker_running", False):
+ report_items.append(reports.cannot_add_node_is_running_service(
+ node_label,
+ "pacemaker"
+ ))
+ return
+
+ if not availability_info.get("pacemaker_remote", False):
+ report_items.append(reports.cannot_add_node_is_in_cluster(node_label))
+ return
diff --git a/pcs/lib/communication/qdevice.py b/pcs/lib/communication/qdevice.py
new file mode 100644
index 0000000..7d4e435
--- /dev/null
+++ b/pcs/lib/communication/qdevice.py
@@ -0,0 +1,82 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.common.node_communicator import RequestData
+from pcs.lib import reports
+from pcs.lib.communication.tools import (
+ AllAtOnceStrategyMixin,
+ AllSameDataMixin,
+ RunRemotelyBase,
+ SkipOfflineMixin,
+ SimpleResponseProcessingMixin,
+)
+
+
+class QdeviceBase(
+ SkipOfflineMixin, AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ #pylint: disable=abstract-method
+ def __init__(self, report_processor, skip_offline_targets=False):
+ super(QdeviceBase, self).__init__(report_processor)
+ self._set_skip_offline(skip_offline_targets)
+
+
+class Stop(SimpleResponseProcessingMixin, QdeviceBase):
+ def _get_request_data(self):
+ return RequestData("remote/qdevice_client_stop")
+
+ def _get_success_report(self, node_label):
+ return reports.service_stop_success("corosync-qdevice", node_label)
+
+
+class Start(QdeviceBase):
+ def _get_request_data(self):
+ return RequestData("remote/qdevice_client_start")
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ node_label = response.request.target.label
+ if report is None:
+ if response.data == "corosync is not running, skipping":
+ report = reports.service_start_skipped(
+ "corosync-qdevice",
+ "corosync is not running",
+ node_label
+ )
+ else:
+ report = reports.service_start_success(
+ "corosync-qdevice", node_label
+ )
+ self._report(report)
+
+
+class Enable(QdeviceBase):
+ def _get_request_data(self):
+ return RequestData("remote/qdevice_client_enable")
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ node_label = response.request.target.label
+ if report is None:
+ if response.data == "corosync is not enabled, skipping":
+ report = reports.service_enable_skipped(
+ "corosync-qdevice",
+ "corosync is not enabled",
+ node_label
+ )
+ else:
+ report = reports.service_enable_success(
+ "corosync-qdevice", node_label
+ )
+ self._report(report)
+
+
+class Disable(SimpleResponseProcessingMixin, QdeviceBase):
+ def _get_request_data(self):
+ return RequestData("remote/qdevice_client_disable")
+
+ def _get_success_report(self, node_label):
+ return reports.service_disable_success("corosync-qdevice", node_label)
diff --git a/pcs/lib/communication/qdevice_net.py b/pcs/lib/communication/qdevice_net.py
new file mode 100644
index 0000000..0786ae1
--- /dev/null
+++ b/pcs/lib/communication/qdevice_net.py
@@ -0,0 +1,128 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import base64
+import binascii
+
+from pcs.common.node_communicator import (
+ Request,
+ RequestData,
+)
+from pcs.lib import reports
+from pcs.lib.communication.tools import (
+ AllAtOnceStrategyMixin,
+ AllSameDataMixin,
+ RunRemotelyBase,
+ SkipOfflineMixin,
+ SimpleResponseProcessingMixin,
+ SimpleResponseProcessingNoResponseOnSuccessMixin,
+)
+from pcs.lib.communication.qdevice import QdeviceBase
+
+
+class GetCaCert(
+ AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(self, report_processor):
+ super(GetCaCert, self).__init__(report_processor)
+ self._data = []
+
+ def _get_request_data(self):
+ return RequestData("remote/qdevice_net_get_ca_certificate")
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ if report is not None:
+ self._report(report)
+ return
+ target = response.request.target
+ try:
+ self._data.append((target, base64.b64decode(response.data)))
+ except (TypeError, binascii.Error):
+ self._report(reports.invalid_response_format(target.label))
+
+ def on_complete(self):
+ return self._data
+
+
+class ClientSetup(
+ SimpleResponseProcessingNoResponseOnSuccessMixin, SkipOfflineMixin,
+ AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase,
+):
+ def __init__(self, report_processor, ca_cert, skip_offline_targets=False):
+ super(ClientSetup, self).__init__(report_processor)
+ self._set_skip_offline(skip_offline_targets)
+ self._ca_cert = ca_cert
+
+ def _get_request_data(self):
+ return RequestData(
+ "remote/qdevice_net_client_init_certificate_storage",
+ [("ca_certificate", base64.b64encode(self._ca_cert))]
+ )
+
+class SignCertificate(AllAtOnceStrategyMixin, RunRemotelyBase):
+ def __init__(self, report_processor):
+ super(SignCertificate, self).__init__(report_processor)
+ self._output_data = []
+ self._input_data = []
+
+ def add_request(self, target, cert, cluster_name):
+ self._input_data.append((target, cert, cluster_name))
+
+ def _prepare_initial_requests(self):
+ return [
+ Request(
+ target,
+ RequestData(
+ "remote/qdevice_net_sign_node_certificate",
+ [
+ ("certificate_request", base64.b64encode(cert)),
+ ("cluster_name", cluster_name),
+ ]
+ )
+ ) for target, cert, cluster_name in self._input_data
+ ]
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ if report is not None:
+ self._report(report)
+ return
+ target = response.request.target
+ try:
+ self._output_data.append((target, base64.b64decode(response.data)))
+ except (TypeError, binascii.Error):
+ self._report(reports.invalid_response_format(target.label))
+
+ def on_complete(self):
+ return self._output_data
+
+
+class ClientImportCertificateAndKey(
+ SimpleResponseProcessingMixin, SkipOfflineMixin, AllSameDataMixin,
+ AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(self, report_processor, pk12, skip_offline_targets=False):
+ super(ClientImportCertificateAndKey, self).__init__(report_processor)
+ self._set_skip_offline(skip_offline_targets)
+ self._pk12 = pk12
+
+ def _get_request_data(self):
+ return RequestData(
+ "remote/qdevice_net_client_import_certificate",
+ [("certificate", base64.b64encode(self._pk12))]
+ )
+
+ def _get_success_report(self, node_label):
+ return reports.qdevice_certificate_accepted_by_node(node_label)
+
+
+class ClientDestroy(SimpleResponseProcessingMixin, QdeviceBase):
+ def _get_request_data(self):
+ return RequestData("remote/qdevice_net_client_destroy")
+
+ def _get_success_report(self, node_label):
+ return reports.qdevice_certificate_removed_from_node(node_label)
diff --git a/pcs/lib/communication/sbd.py b/pcs/lib/communication/sbd.py
new file mode 100644
index 0000000..1a7f1b1
--- /dev/null
+++ b/pcs/lib/communication/sbd.py
@@ -0,0 +1,268 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import json
+
+from pcs.common.node_communicator import (
+ Request,
+ RequestData,
+)
+from pcs.lib import reports
+from pcs.lib.communication.tools import (
+ AllAtOnceStrategyMixin,
+ AllSameDataMixin,
+ OneByOneStrategyMixin,
+ RunRemotelyBase,
+ SimpleResponseProcessingMixin,
+)
+from pcs.lib.errors import ReportItemSeverity
+from pcs.lib.node_communication import response_to_report_item
+from pcs.lib.tools import environment_file_to_dict
+
+
+class ServiceAction(
+ SimpleResponseProcessingMixin, AllSameDataMixin, AllAtOnceStrategyMixin,
+ RunRemotelyBase
+):
+ def _get_request_action(self):
+ raise NotImplementedError()
+
+ def _get_before_report(self):
+ raise NotImplementedError()
+
+ def _get_success_report(self, node_label):
+ raise NotImplementedError()
+
+ def _get_request_data(self):
+ return RequestData(self._get_request_action())
+
+ def before(self):
+ self._report(self._get_before_report())
+
+
+class EnableSbdService(ServiceAction):
+ def _get_request_action(self):
+ return "remote/sbd_enable"
+
+ def _get_before_report(self):
+ return reports.sbd_enabling_started()
+
+ def _get_success_report(self, node_label):
+ return reports.service_enable_success("sbd", node_label)
+
+
+class DisableSbdService(ServiceAction):
+ def _get_request_action(self):
+ return "remote/sbd_disable"
+
+ def _get_before_report(self):
+ return reports.sbd_disabling_started()
+
+ def _get_success_report(self, node_label):
+ return reports.service_disable_success("sbd", node_label)
+
+
+class StonithWatchdogTimeoutAction(
+ AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase
+):
+ def _get_request_action(self):
+ raise NotImplementedError()
+
+ def _get_request_data(self):
+ return RequestData(self._get_request_action())
+
+ def _process_response(self, response):
+ report = response_to_report_item(response)
+ if report is None:
+ return
+ self._report(report)
+ return self._get_next_list()
+
+
+class RemoveStonithWatchdogTimeout(StonithWatchdogTimeoutAction):
+ def _get_request_action(self):
+ return "remote/remove_stonith_watchdog_timeout"
+
+
+class SetStonithWatchdogTimeoutToZero(StonithWatchdogTimeoutAction):
+ def _get_request_action(self):
+ return "remote/set_stonith_watchdog_timeout_to_zero"
+
+
+class SetSbdConfig(
+ SimpleResponseProcessingMixin, AllAtOnceStrategyMixin, RunRemotelyBase
+):
+ def __init__(self, report_processor):
+ super(SetSbdConfig, self).__init__(report_processor)
+ self._request_data_list = []
+
+ def _prepare_initial_requests(self):
+ return [
+ Request(
+ target,
+ RequestData("remote/set_sbd_config", [("config", config)])
+ ) for target, config in self._request_data_list
+ ]
+
+ def _get_success_report(self, node_label):
+ return reports.sbd_config_accepted_by_node(node_label)
+
+ def add_request(self, target, config):
+ self._request_data_list.append((target, config))
+
+ def before(self):
+ self._report(reports.sbd_config_distribution_started())
+
+
+class GetSbdConfig(AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase):
+ def __init__(self, report_processor):
+ super(GetSbdConfig, self).__init__(report_processor)
+ self._config_list = []
+ self._successful_target_list = []
+
+ def _get_request_data(self):
+ return RequestData("remote/get_sbd_config")
+
+ def _process_response(self, response):
+ report = response_to_report_item(
+ response, severity=ReportItemSeverity.WARNING
+ )
+ node_label = response.request.target.label
+ if report is not None:
+ if not response.was_connected:
+ self._report(report)
+ self._report(
+ reports.unable_to_get_sbd_config(
+ node_label, "", ReportItemSeverity.WARNING
+ )
+ )
+ return
+ self._config_list.append({
+ "node": node_label,
+ "config": environment_file_to_dict(response.data)
+ })
+ self._successful_target_list.append(node_label)
+
+ def on_complete(self):
+ for node in self._target_list:
+ if node.label not in self._successful_target_list:
+ self._config_list.append({
+ "node": node.label,
+ "config": None
+ })
+ return self._config_list
+
+
+class GetSbdStatus(AllSameDataMixin, AllAtOnceStrategyMixin, RunRemotelyBase):
+ def __init__(self, report_processor):
+ super(GetSbdStatus, self).__init__(report_processor)
+ self._status_list = []
+ self._successful_target_list = []
+
+ def _get_request_data(self):
+ return RequestData("remote/check_sbd",
+ # here we just need info about sbd service, therefore watchdog and
+ # device list is empty
+ [
+ ("watchdog", ""),
+ ("device_list", "[]"),
+ ]
+ )
+
+ def _process_response(self, response):
+ report = response_to_report_item(
+ response, severity=ReportItemSeverity.WARNING
+ )
+ node_label = response.request.target.label
+ if report is not None:
+ self._report_list([
+ report,
+ #reason is in previous report item, warning is there implicit
+ reports.unable_to_get_sbd_status(node_label, "")
+ ])
+ return
+ try:
+ self._status_list.append({
+ "node": node_label,
+ "status": json.loads(response.data)["sbd"]
+ })
+ self._successful_target_list.append(node_label)
+ except (ValueError, KeyError) as e:
+ self._report(reports.unable_to_get_sbd_status(node_label, str(e)))
+
+ def on_complete(self):
+ for node in self._target_list:
+ if node.label not in self._successful_target_list:
+ self._status_list.append({
+ "node": node.label,
+ "status": {
+ "installed": None,
+ "enabled": None,
+ "running": None
+ }
+ })
+ return self._status_list
+
+
+class CheckSbd(AllAtOnceStrategyMixin, RunRemotelyBase):
+ def __init__(self, report_processor):
+ super(CheckSbd, self).__init__(report_processor)
+ self._request_data_list = []
+
+ def _prepare_initial_requests(self):
+ return [
+ Request(
+ target,
+ RequestData(
+ "remote/check_sbd",
+ [
+ ("watchdog", watchdog),
+ ("device_list", json.dumps(device_list))
+ ]
+ )
+ ) for target, watchdog, device_list in self._request_data_list
+ ]
+
+ def _process_response(self, response):
+ report = response_to_report_item(response)
+ if report:
+ self._report(report)
+ return
+ report_list = []
+ node_label = response.request.target.label
+ try:
+ data = json.loads(response.data)
+ if not data["sbd"]["installed"]:
+ report_list.append(reports.sbd_not_installed(node_label))
+ if not data["watchdog"]["exist"]:
+ report_list.append(reports.watchdog_not_found(
+ node_label, data["watchdog"]["path"]
+ ))
+ for device in data.get("device_list", []):
+ if not device["exist"]:
+ report_list.append(reports.sbd_device_does_not_exist(
+ device["path"], node_label
+ ))
+ elif not device["block_device"]:
+ report_list.append(reports.sbd_device_is_not_block_device(
+ device["path"], node_label
+ ))
+ # TODO maybe we can check whenever device is initialized by sbd (by
+ # running 'sbd -d <dev> dump;')
+ except (ValueError, KeyError, TypeError):
+ report_list.append(reports.invalid_response_format(node_label))
+ if report_list:
+ self._report_list(report_list)
+ else:
+ self._report(
+ reports.sbd_check_success(response.request.target.label)
+ )
+
+ def add_request(self, target, watchdog, device_list):
+ self._request_data_list.append((target, watchdog, device_list))
+
+ def before(self):
+ self._report(reports.sbd_check_started())
diff --git a/pcs/lib/communication/test/__init__.py b/pcs/lib/communication/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/communication/test/test_nodes.py b/pcs/lib/communication/test/test_nodes.py
new file mode 100644
index 0000000..75ecb6f
--- /dev/null
+++ b/pcs/lib/communication/test/test_nodes.py
@@ -0,0 +1,125 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.test.tools.assertions import assert_report_item_list_equal
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+
+from pcs.lib.communication import nodes
+
+
+class AvailabilityCheckerNode(TestCase):
+ def setUp(self):
+ self.node = "node1"
+
+ def assert_result_causes_reports(
+ self, availability_info, expected_report_items
+ ):
+ report_items = []
+ nodes.availability_checker_node(
+ availability_info,
+ report_items,
+ self.node
+ )
+ assert_report_item_list_equal(report_items, expected_report_items)
+
+ def test_no_reports_when_available(self):
+ self.assert_result_causes_reports({"node_available": True}, [])
+
+ def test_report_node_is_in_cluster(self):
+ self.assert_result_causes_reports({"node_available": False}, [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
+ {
+ "node": self.node
+ }
+ ),
+ ])
+
+ def test_report_node_is_running_pacemaker_remote(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_remote": True},
+ [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
+ {
+ "node": self.node,
+ "service": "pacemaker_remote",
+ }
+ ),
+ ]
+ )
+
+ def test_report_node_is_running_pacemaker(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_running": True},
+ [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
+ {
+ "node": self.node,
+ "service": "pacemaker",
+ }
+ ),
+ ]
+ )
+
+
+class AvailabilityCheckerRemoteNode(TestCase):
+ def setUp(self):
+ self.node = "node1"
+
+ def assert_result_causes_reports(
+ self, availability_info, expected_report_items
+ ):
+ report_items = []
+ nodes.availability_checker_remote_node(
+ availability_info,
+ report_items,
+ self.node
+ )
+ assert_report_item_list_equal(report_items, expected_report_items)
+
+ def test_no_reports_when_available(self):
+ self.assert_result_causes_reports({"node_available": True}, [])
+
+ def test_report_node_is_running_pacemaker(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_running": True},
+ [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
+ {
+ "node": self.node,
+ "service": "pacemaker",
+ }
+ ),
+ ]
+ )
+
+ def test_report_node_is_in_cluster(self):
+ self.assert_result_causes_reports({"node_available": False}, [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
+ {
+ "node": self.node
+ }
+ ),
+ ])
+
+ def test_no_reports_when_pacemaker_remote_there(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_remote": True},
+ []
+ )
diff --git a/pcs/lib/communication/tools.py b/pcs/lib/communication/tools.py
new file mode 100644
index 0000000..115836b
--- /dev/null
+++ b/pcs/lib/communication/tools.py
@@ -0,0 +1,314 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.common import report_codes
+from pcs.common.node_communicator import Request
+from pcs.lib.node_communication import response_to_report_item
+from pcs.lib.errors import (
+ LibraryError,
+ ReportItemSeverity,
+)
+
+
+def run(communicator, cmd):
+ """
+ Run communication command. Returns return value of method on_complete() of
+ communcation command after run.
+
+ NodeCommunicator communicator -- object used for communication
+ CommunicationCommandInterface cmd
+ """
+ cmd.before()
+ communicator.add_requests(cmd.get_initial_request_list())
+ for response in communicator.start_loop():
+ extra_requests = cmd.on_response(response)
+ if extra_requests:
+ communicator.add_requests(extra_requests)
+ return cmd.on_complete()
+
+
+def run_and_raise(communicator, cmd):
+ """
+ Run communication command. Returns return value of method on_complete() of
+ communcation command after run.
+ Raises LibraryError (with no report item) when some errors occured while
+ running communication command.
+
+ NodeCommunicator communicator -- object used for communication
+ CommunicationCommandInterface cmd
+ """
+ to_return = run(communicator, cmd)
+ if cmd.error_list:
+ raise LibraryError()
+ return to_return
+
+
+class CommunicationCommandInterface(object):
+ """
+ Interface for all communication commands.
+ """
+ def get_initial_request_list(self):
+ """
+ Returns an initial list of Request object.
+ """
+ raise NotImplementedError()
+
+ def on_response(self, response):
+ """
+ Process received response. Returns list of new Request that should be
+ added to the executing queue.
+
+ Response response -- a response to be processed
+ """
+ raise NotImplementedError()
+
+ def on_complete(self):
+ """
+ Runs after all reqests finished.
+ """
+ raise NotImplementedError()
+
+ def before(self):
+ """
+ Runs before executing the requests.
+ """
+ raise NotImplementedError()
+
+ @property
+ def error_list(self):
+ """
+ List of errors which occured during running the requests.
+ """
+ raise NotImplementedError()
+
+
+class RunRemotelyBase(CommunicationCommandInterface):
+ """
+ Abstract class for communication commands. This class provides methods for
+ reporting.
+ """
+ #pylint: disable=abstract-method
+ def __init__(self, report_processor):
+ self._report_processor = report_processor
+ self._error_list = []
+
+ def _get_response_report(self, response):
+ """
+ Convert specified response to report item. Returns None if the response
+ has no failures.
+
+ Response response -- a response to be converted
+ """
+ return response_to_report_item(response)
+
+ def _report_list(self, report_list):
+ """
+ Send reports from report_list to the report processor.
+
+ list report_list -- list of ReportItem objects
+ """
+ self._error_list.extend(self._report_processor.report_list(report_list))
+
+ def _report(self, report):
+ """
+ Send specified report to the report processor.
+
+ ReportItem report -- report which will be reported
+ """
+ self._report_list([report])
+
+ def _process_response(self, response):
+ """
+ Process received response. Returns list of new Request that should be
+ added to the executing queue. If no new Request should be added, there
+ is no need to return empty list.
+
+ Response response -- a response to be processed
+ """
+ raise NotImplementedError()
+
+ def on_response(self, response):
+ returned = self._process_response(response)
+ return returned if returned else []
+
+ def on_complete(self):
+ return None
+
+ def before(self):
+ pass
+
+ @property
+ def error_list(self):
+ return self._error_list
+
+
+class StrategyBase(object):
+ """
+ Abstract base class of the communication strategies. Always use at most one
+ strategy mixin in the communication commands classes.
+ """
+ def _prepare_initial_requests(self):
+ """
+ Returns list of all Request objects which should be run. Full
+ implementation of strategy will use this list for creating initial
+ request list and others.
+ """
+ raise NotImplementedError()
+
+ def get_initial_request_list(self):
+ """
+ This method has to be implemented by the descendants.
+ """
+ raise NotImplementedError()
+
+
+class OneByOneStrategyMixin(StrategyBase):
+ """
+ Communication strategy in which requests are executed one by one. So only one
+ request from _prepare_initial_requests is chosen as initial request list.
+ Other requests are then available by calling method _get_next_list.
+ """
+ #pylint: disable=abstract-method
+ __iter = None
+
+ def get_initial_request_list(self):
+ """
+ Returns only first request from _prepare_initial_requests.
+ """
+ self.__iter = iter(self._prepare_initial_requests())
+ return self._get_next_list()
+
+ def _get_next_list(self):
+ """
+ Returns a list which contains another Request object from
+ _prepare_initial_requests. Raises StopIteration when there is no other
+ request left.
+ """
+ try:
+ return [next(self.__iter)]
+ except StopIteration:
+ return []
+
+
+class AllAtOnceStrategyMixin(StrategyBase):
+ """
+ Communication strategy in which all requests are executed at once in
+ parallel.
+ """
+ #pylint: disable=abstract-method
+ def get_initial_request_list(self):
+ return self._prepare_initial_requests()
+
+
+class AllSameDataMixin(object):
+ """
+ Communication command mixin which adds common methods for commands where
+ requests to all targets have the same data.
+ """
+ __targets = None
+
+ def _get_request_data(self):
+ """
+ Returns RequestData object to use as data for requests to all targets.
+ """
+ raise NotImplementedError()
+
+ def _prepare_initial_requests(self):
+ return [
+ Request(target, self._get_request_data())
+ for target in self.__target_list
+ ]
+
+ def add_request(self, target):
+ """
+ Add target to which request will be send.
+
+ RequestTarget target -- target that will be added.
+ """
+ self.set_targets([target])
+
+ def set_targets(self, target_list):
+ """
+ Add targets to which requests will be send.
+
+ list target_list -- RequestTarget list
+ """
+ self.__target_list.extend(target_list)
+
+ @property
+ def __target_list(self):
+ if self.__targets is None:
+ self.__targets = []
+ return self.__targets
+
+ @property
+ def _target_list(self):
+ """
+ List of RequestTarget to which request will be send.
+ """
+ return list(self.__target_list)
+
+
+class SimpleResponseProcessingMixin(object):
+ """
+ Communication command mixin which adds common response processing. When
+ request fails error/warning will be reported. Otherwise _get_success_report
+ will be reported.
+ """
+ def _get_success_report(self, node_label):
+ """
+ Returns ReportItem which should be reported when request was
+ successfull.
+
+ string node_label -- node identifier on which request was successful
+ """
+ raise NotImplementedError()
+
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ if report is None:
+ report = self._get_success_report(response.request.target.label)
+ self._report(report)
+
+
+class SimpleResponseProcessingNoResponseOnSuccessMixin(object):
+ """
+ Communication command mixin which adds common response processing. When
+ request fails error/warning will be reported.
+ """
+ def _process_response(self, response):
+ report = self._get_response_report(response)
+ if report is not None:
+ self._report(report)
+
+
+class SkipOfflineMixin(object):
+ """
+ Communication command mixin which simplifies handling of forcing skip
+ offline nodes. This mixin provides method _set_skip_offline which should be
+ called from __init__ of the descendants. Then report item from response
+ returned from _get_response_report is set accordingly to value of
+ skip_offline_targets.
+ """
+ _failure_severity = ReportItemSeverity.ERROR
+ _failure_forceable = report_codes.SKIP_OFFLINE_NODES
+
+ def _set_skip_offline(self, skip_offline_targets):
+ """
+ Set value of skip_offline_targets flag.
+
+ boolean skip_offline_targets
+ """
+ if skip_offline_targets:
+ self._failure_severity = ReportItemSeverity.WARNING
+ self._failure_forceable = None
+
+ def _get_response_report(self, response):
+ return response_to_report_item(
+ response,
+ severity=self._failure_severity,
+ forceable=self._failure_forceable,
+ )
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
index 23b8e4e..2145eb0 100644
--- a/pcs/lib/corosync/config_facade.py
+++ b/pcs/lib/corosync/config_facade.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import report_codes
diff --git a/pcs/lib/corosync/config_parser.py b/pcs/lib/corosync/config_parser.py
index 7604ba8..151dfd5 100644
--- a/pcs/lib/corosync/config_parser.py
+++ b/pcs/lib/corosync/config_parser.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
index 6030375..9a16975 100644
--- a/pcs/lib/corosync/live.py
+++ b/pcs/lib/corosync/live.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os.path
@@ -11,7 +10,6 @@ from pcs import settings
from pcs.common.tools import join_multilines
from pcs.lib import reports
from pcs.lib.errors import LibraryError
-from pcs.lib.external import NodeCommunicator
def get_local_corosync_conf():
"""
@@ -38,18 +36,6 @@ def get_local_cluster_conf():
def exists_local_corosync_conf():
return os.path.exists(settings.corosync_conf_file)
-def set_remote_corosync_conf(node_communicator, node_addr, config_text):
- """
- Send corosync.conf to a node
- node_addr instance of NodeAddresses
- config_text corosync.conf text
- """
- dummy_response = node_communicator.call_node(
- node_addr,
- "remote/set_corosync_conf",
- NodeCommunicator.format_data_dict({'corosync_conf': config_text})
- )
-
def reload_config(runner):
"""
Ask corosync to reload its configuration
diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py
index c9d0095..7289703 100644
--- a/pcs/lib/corosync/qdevice_client.py
+++ b/pcs/lib/corosync/qdevice_client.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os.path
@@ -33,64 +32,3 @@ def get_status_text(runner, verbose=False):
)
return stdout
-def remote_client_enable(reporter, node_communicator, node):
- """
- enable qdevice client service (corosync-qdevice) on a remote node
- """
- response = node_communicator.call_node(
- node,
- "remote/qdevice_client_enable",
- None
- )
- if response == "corosync is not enabled, skipping":
- reporter.process(
- reports.service_enable_skipped(
- "corosync-qdevice",
- "corosync is not enabled",
- node.label
- )
- )
- else:
- reporter.process(
- reports.service_enable_success("corosync-qdevice", node.label)
- )
-
-def remote_client_disable(reporter, node_communicator, node):
- """
- disable qdevice client service (corosync-qdevice) on a remote node
- """
- node_communicator.call_node(node, "remote/qdevice_client_disable", None)
- reporter.process(
- reports.service_disable_success("corosync-qdevice", node.label)
- )
-
-def remote_client_start(reporter, node_communicator, node):
- """
- start qdevice client service (corosync-qdevice) on a remote node
- """
- response = node_communicator.call_node(
- node,
- "remote/qdevice_client_start",
- None
- )
- if response == "corosync is not running, skipping":
- reporter.process(
- reports.service_start_skipped(
- "corosync-qdevice",
- "corosync is not running",
- node.label
- )
- )
- else:
- reporter.process(
- reports.service_start_success("corosync-qdevice", node.label)
- )
-
-def remote_client_stop(reporter, node_communicator, node):
- """
- stop qdevice client service (corosync-qdevice) on a remote node
- """
- node_communicator.call_node(node, "remote/qdevice_client_stop", None)
- reporter.process(
- reports.service_stop_success("corosync-qdevice", node.label)
- )
diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
index fa44923..ab2379d 100644
--- a/pcs/lib/corosync/qdevice_net.py
+++ b/pcs/lib/corosync/qdevice_net.py
@@ -2,11 +2,8 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-import base64
-import binascii
import functools
import os
import os.path
@@ -330,84 +327,6 @@ def client_import_certificate_and_key(runner, pk12_certificate):
)
)
-def remote_qdevice_get_ca_certificate(node_communicator, host):
- """
- connect to a qnetd host and get qnetd CA certificate
- string host address of the qnetd host
- """
- try:
- return base64.b64decode(
- node_communicator.call_host(
- host,
- "remote/qdevice_net_get_ca_certificate",
- None
- )
- )
- except (TypeError, binascii.Error):
- raise LibraryError(reports.invalid_response_format(host))
-
-def remote_client_setup(node_communicator, node, qnetd_ca_certificate):
- """
- connect to a remote node and initialize qdevice there
- NodeAddresses node target node
- qnetd_ca_certificate qnetd CA certificate
- """
- return node_communicator.call_node(
- node,
- "remote/qdevice_net_client_init_certificate_storage",
- external.NodeCommunicator.format_data_dict([
- ("ca_certificate", base64.b64encode(qnetd_ca_certificate)),
- ])
- )
-
-def remote_sign_certificate_request(
- node_communicator, host, cert_request, cluster_name
-):
- """
- connect to a qdevice host and sign node certificate there
- string host address of the qnetd host
- cert_request certificate request to be signed
- string cluster_name name of the cluster to which qdevice is being added
- """
- try:
- return base64.b64decode(
- node_communicator.call_host(
- host,
- "remote/qdevice_net_sign_node_certificate",
- external.NodeCommunicator.format_data_dict([
- ("certificate_request", base64.b64encode(cert_request)),
- ("cluster_name", cluster_name),
- ])
- )
- )
- except (TypeError, binascii.Error):
- raise LibraryError(reports.invalid_response_format(host))
-
-def remote_client_import_certificate_and_key(node_communicator, node, pk12):
- """
- import pk12 certificate on a remote node
- NodeAddresses node target node
- pk12 certificate
- """
- return node_communicator.call_node(
- node,
- "remote/qdevice_net_client_import_certificate",
- external.NodeCommunicator.format_data_dict([
- ("certificate", base64.b64encode(pk12)),
- ])
- )
-
-def remote_client_destroy(node_communicator, node):
- """
- delete qdevice client config files on a remote node
- NodeAddresses node target node
- """
- return node_communicator.call_node(
- node,
- "remote/qdevice_net_client_destroy",
- None
- )
-
def _store_to_tmpfile(data, report_func):
try:
tmpfile = tempfile.NamedTemporaryFile(mode="wb", suffix=".pcs")
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
index 97ec50c..3d31211 100644
--- a/pcs/lib/env.py
+++ b/pcs/lib/env.py
@@ -2,18 +2,28 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-from lxml import etree
import os.path
-import tempfile
from pcs import settings
+from pcs.common.node_communicator import (
+ NodeCommunicatorFactory,
+ NodeTargetFactory
+)
from pcs.lib import reports
from pcs.lib.booth.env import BoothEnv
from pcs.lib.pacemaker.env import PacemakerEnv
from pcs.lib.cluster_conf_facade import ClusterConfFacade
+from pcs.lib.communication import qdevice
+from pcs.lib.communication.corosync import (
+ CheckCorosyncOffline,
+ DistributeCorosyncConf,
+)
+from pcs.lib.communication.tools import (
+ run,
+ run_and_raise,
+)
from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
from pcs.lib.corosync.live import (
exists_local_corosync_conf,
@@ -28,22 +38,22 @@ from pcs.lib.external import (
NodeCommunicator,
)
from pcs.lib.errors import LibraryError
-from pcs.lib.nodes_task import (
- distribute_corosync_conf,
- check_corosync_offline_on_nodes,
- qdevice_reload_on_nodes,
-)
+from pcs.lib.node_communication import LibCommunicatorLogger
from pcs.lib.pacemaker.live import (
- ensure_wait_for_idle_support,
+ diff_cibs_xml,
ensure_cib_version,
+ ensure_wait_for_idle_support,
get_cib,
get_cib_xml,
- replace_cib_configuration_xml,
- wait_for_idle,
get_cluster_status_xml,
+ push_cib_diff_xml,
+ replace_cib_configuration,
+ wait_for_idle,
)
from pcs.lib.pacemaker.state import get_cluster_state_dom
from pcs.lib.pacemaker.values import get_valid_timeout_seconds
+from pcs.lib.tools import write_tmpfile
+from pcs.lib.xml_tools import etree_to_str
class LibraryEnvironment(object):
# pylint: disable=too-many-instance-attributes
@@ -58,7 +68,7 @@ class LibraryEnvironment(object):
corosync_conf_data=None,
booth=None,
pacemaker=None,
- auth_tokens_getter=None,
+ token_file_data_getter=None,
cluster_conf_data=None,
request_timeout=None,
):
@@ -80,10 +90,18 @@ class LibraryEnvironment(object):
# TODO tokens probably should not be inserted from outside, but we're
# postponing dealing with them, because it's not that easy to move
# related code currently - it's in pcsd
- self._auth_tokens_getter = auth_tokens_getter
- self._auth_tokens = None
- self._cib_upgraded = False
+ self._token_file_data_getter = token_file_data_getter
+ self._token_file = None
+ self._cib_upgrade_reported = False
self._cib_data_tmp_file = None
+ self.__loaded_cib_diff_source = None
+ self.__loaded_cib_to_modify = None
+ self._communicator_factory = NodeCommunicatorFactory(
+ LibCommunicatorLogger(self.logger, self.report_processor),
+ self.user_login,
+ self.user_groups,
+ self._request_timeout
+ )
self.__timeout_cache = {}
@@ -109,43 +127,36 @@ class LibraryEnvironment(object):
self._is_cman_cluster = is_cman_cluster(self.cmd_runner())
return self._is_cman_cluster
- @property
- def cib_upgraded(self):
- return self._cib_upgraded
-
- def _get_cib_xml(self):
- if self.is_cib_live:
- return get_cib_xml(self.cmd_runner())
- else:
- return self._cib_data
-
def get_cib(self, minimal_version=None):
- cib = get_cib(self._get_cib_xml())
+ if self.__loaded_cib_diff_source is not None:
+ raise AssertionError("CIB has already been loaded")
+ self.__loaded_cib_diff_source = get_cib_xml(self.cmd_runner())
+ self.__loaded_cib_to_modify = get_cib(self.__loaded_cib_diff_source)
if minimal_version is not None:
upgraded_cib = ensure_cib_version(
self.cmd_runner(),
- cib,
+ self.__loaded_cib_to_modify,
minimal_version
)
if upgraded_cib is not None:
- cib = upgraded_cib
- if self.is_cib_live and not self._cib_upgraded:
+ self.__loaded_cib_to_modify = upgraded_cib
+ self.__loaded_cib_diff_source = etree_to_str(upgraded_cib)
+ if not self._cib_upgrade_reported:
self.report_processor.process(
reports.cib_upgrade_successful()
)
- self._cib_upgraded = True
- return cib
+ self._cib_upgrade_reported = True
+ return self.__loaded_cib_to_modify
+
+ @property
+ def cib(self):
+ if self.__loaded_cib_diff_source is None:
+ raise AssertionError("CIB has not been loaded")
+ return self.__loaded_cib_to_modify
def get_cluster_state(self):
return get_cluster_state_dom(get_cluster_status_xml(self.cmd_runner()))
- def _push_cib_xml(self, cib_data):
- if self.is_cib_live:
- replace_cib_configuration_xml(self.cmd_runner(), cib_data)
- self._cib_upgraded = False
- else:
- self._cib_data = cib_data
-
def _get_wait_timeout(self, wait):
if wait is False:
return False
@@ -166,26 +177,80 @@ class LibraryEnvironment(object):
"""
self._get_wait_timeout(wait)
- def push_cib(self, cib, wait=False):
- timeout = self._get_wait_timeout(wait)
- #etree returns bytes: b'xml'
- #python 3 removed .encode() from bytes
- #run(...) calls subprocess.Popen.communicate which calls encode...
- #so here is bytes to str conversion
- self._push_cib_xml(etree.tostring(cib).decode())
+ def push_cib(self, custom_cib=None, wait=False):
+ if custom_cib is not None:
+ return self.push_cib_full(custom_cib, wait)
+ return self.push_cib_diff(wait)
- if timeout is not False:
- wait_for_idle(self.cmd_runner(), timeout)
+ def push_cib_full(self, custom_cib=None, wait=False):
+ if custom_cib is None and self.__loaded_cib_diff_source is None:
+ raise AssertionError("CIB has not been loaded")
+ if custom_cib is not None and self.__loaded_cib_diff_source is not None:
+ raise AssertionError("CIB has been loaded, cannot push custom CIB")
+
+ cmd_runner = self.cmd_runner()
+ cib_to_push = (
+ self.__loaded_cib_to_modify if custom_cib is None else custom_cib
+ )
+ self.__do_push_cib(
+ cmd_runner,
+ lambda: replace_cib_configuration(cmd_runner, cib_to_push),
+ wait
+ )
+
+ def push_cib_diff(self, wait=False):
+ if self.__loaded_cib_diff_source is None:
+ raise AssertionError("CIB has not been loaded")
+
+ cmd_runner = self.cmd_runner()
+ self.__do_push_cib(
+ cmd_runner,
+ lambda: self.__main_push_cib_diff(cmd_runner),
+ wait
+ )
+
+ def __main_push_cib_diff(self, cmd_runner):
+ cib_diff_xml = diff_cibs_xml(
+ cmd_runner,
+ self.report_processor,
+ self.__loaded_cib_diff_source,
+ etree_to_str(self.__loaded_cib_to_modify)
+ )
+
+ if cib_diff_xml:
+ push_cib_diff_xml(cmd_runner, cib_diff_xml)
+
+ def __do_push_cib(self, cmd_runner, push_strategy, wait):
+ timeout = self._get_wait_timeout(wait)
+ push_strategy()
+ self._cib_upgrade_reported = False
+ self.__loaded_cib_diff_source = None
+ self.__loaded_cib_to_modify = None
+ if self.is_cib_live and timeout is not False:
+ wait_for_idle(cmd_runner, timeout)
@property
def is_cib_live(self):
return self._cib_data is None
+ @property
+ def final_mocked_cib_content(self):
+ if self.is_cib_live:
+ raise AssertionError(
+ "Final mocked cib content does not make sense in live env."
+ )
+
+ if self._cib_data_tmp_file:
+ self._cib_data_tmp_file.seek(0)
+ return self._cib_data_tmp_file.read()
+
+ return self._cib_data
+
+
def get_corosync_conf_data(self):
if self._corosync_conf_data is None:
return get_local_corosync_conf()
- else:
- return self._corosync_conf_data
+ return self._corosync_conf_data
def get_corosync_conf(self):
return CorosyncConfigFacade.from_string(self.get_corosync_conf_data())
@@ -195,42 +260,57 @@ class LibraryEnvironment(object):
):
corosync_conf_data = corosync_conf_facade.config.export()
if self.is_corosync_conf_live:
- node_list = corosync_conf_facade.get_nodes()
- if corosync_conf_facade.need_stopped_cluster:
- check_corosync_offline_on_nodes(
- self.node_communicator(),
- self.report_processor,
- node_list,
- skip_offline_nodes
- )
- distribute_corosync_conf(
- self.node_communicator(),
- self.report_processor,
- node_list,
+ self._push_corosync_conf_live(
+ self.get_node_target_factory().get_target_list(
+ corosync_conf_facade.get_nodes()
+ ),
corosync_conf_data,
- skip_offline_nodes
+ corosync_conf_facade.need_stopped_cluster,
+ corosync_conf_facade.need_qdevice_reload,
+ skip_offline_nodes,
)
- if is_service_running(self.cmd_runner(), "corosync"):
- reload_corosync_config(self.cmd_runner())
- self.report_processor.process(
- reports.corosync_config_reloaded()
- )
- if corosync_conf_facade.need_qdevice_reload:
- qdevice_reload_on_nodes(
- self.node_communicator(),
- self.report_processor,
- node_list,
- skip_offline_nodes
- )
else:
self._corosync_conf_data = corosync_conf_data
+ def _push_corosync_conf_live(
+ self, target_list, corosync_conf_data, need_stopped_cluster,
+ need_qdevice_reload, skip_offline_nodes
+ ):
+ if need_stopped_cluster:
+ com_cmd = CheckCorosyncOffline(
+ self.report_processor, skip_offline_nodes
+ )
+ com_cmd.set_targets(target_list)
+ run_and_raise(self.get_node_communicator(), com_cmd)
+ com_cmd = DistributeCorosyncConf(
+ self.report_processor, corosync_conf_data, skip_offline_nodes
+ )
+ com_cmd.set_targets(target_list)
+ run_and_raise(self.get_node_communicator(), com_cmd)
+ if is_service_running(self.cmd_runner(), "corosync"):
+ reload_corosync_config(self.cmd_runner())
+ self.report_processor.process(
+ reports.corosync_config_reloaded()
+ )
+ if need_qdevice_reload:
+ self.report_processor.process(
+ reports.qdevice_client_reload_started()
+ )
+ com_cmd = qdevice.Stop(self.report_processor, skip_offline_nodes)
+ com_cmd.set_targets(target_list)
+ run(self.get_node_communicator(), com_cmd)
+ report_list = com_cmd.error_list
+ com_cmd = qdevice.Start(self.report_processor, skip_offline_nodes)
+ com_cmd.set_targets(target_list)
+ run(self.get_node_communicator(), com_cmd)
+ report_list += com_cmd.error_list
+ if report_list:
+ raise LibraryError()
def get_cluster_conf_data(self):
if self.is_cluster_conf_live:
return get_local_cluster_conf()
- else:
- return self._cluster_conf_data
+ return self._cluster_conf_data
def get_cluster_conf(self):
@@ -279,18 +359,32 @@ class LibraryEnvironment(object):
# don't need to take care of it every time the runner is called.
if not self._cib_data_tmp_file:
try:
- self._cib_data_tmp_file = tempfile.NamedTemporaryFile(
- "w+",
- suffix=".pcs"
+ cib_data = self._cib_data
+ self._cib_data_tmp_file = write_tmpfile(cib_data)
+ self.report_processor.process(
+ reports.tmp_file_write(
+ self._cib_data_tmp_file.name,
+ cib_data
+ )
)
- self._cib_data_tmp_file.write(self._get_cib_xml())
- self._cib_data_tmp_file.flush()
except EnvironmentError as e:
raise LibraryError(reports.cib_save_tmp_error(str(e)))
runner_env["CIB_file"] = self._cib_data_tmp_file.name
return CommandRunner(self.logger, self.report_processor, runner_env)
+ @property
+ def communicator_factory(self):
+ return self._communicator_factory
+
+ def get_node_communicator(self):
+ return self.communicator_factory.get_communicator()
+
+ def get_node_target_factory(self):
+ token_file = self.__get_token_file()
+ return NodeTargetFactory(token_file["tokens"], token_file["ports"])
+
+ # deprecated, use communicator_factory or get_node_communicator()
def node_communicator(self):
return NodeCommunicator(
self.logger,
@@ -301,13 +395,16 @@ class LibraryEnvironment(object):
self._request_timeout
)
- def __get_auth_tokens(self):
- if self._auth_tokens is None:
- if self._auth_tokens_getter:
- self._auth_tokens = self._auth_tokens_getter()
+ def __get_token_file(self):
+ if self._token_file is None:
+ if self._token_file_data_getter:
+ self._token_file = self._token_file_data_getter()
else:
- self._auth_tokens = {}
- return self._auth_tokens
+ self._token_file = {
+ "tokens": {},
+ "ports": {},
+ }
+ return self._token_file
@property
def booth(self):
diff --git a/pcs/lib/env_file.py b/pcs/lib/env_file.py
index 92b4124..d3b3d88 100644
--- a/pcs/lib/env_file.py
+++ b/pcs/lib/env_file.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os.path
diff --git a/pcs/lib/env_tools.py b/pcs/lib/env_tools.py
index bc2c7a4..9791960 100644
--- a/pcs/lib/env_tools.py
+++ b/pcs/lib/env_tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.lib.cib.resource import remote_node, guest_node
diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py
index 110e896..b0fd110 100644
--- a/pcs/lib/errors.py
+++ b/pcs/lib/errors.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
class LibraryError(Exception):
@@ -54,10 +53,11 @@ class ReportItem(object):
self.info = info if info else dict()
def __repr__(self):
- return "{severity} {code}: {info}".format(
+ return "{severity} {code}: {info} forceable: {forceable}".format(
severity=self.severity,
code=self.code,
- info=self.info
+ info=self.info,
+ forceable=self.forceable,
)
class ReportListAnalyzer(object):
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
index 5e3133d..4ef5d6b 100644
--- a/pcs/lib/external.py
+++ b/pcs/lib/external.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import base64
@@ -28,14 +27,10 @@ except ImportError:
from urllib.parse import urlencode as urllib_urlencode
from pcs import settings
-from pcs.common import (
- pcs_pycurl as pycurl,
- report_codes,
-)
+from pcs.common import pcs_pycurl as pycurl
from pcs.common.tools import (
join_multilines,
simple_cache,
- run_parallel as tools_run_parallel,
)
from pcs.lib import reports
from pcs.lib.errors import LibraryError, ReportItemSeverity
@@ -99,14 +94,12 @@ def is_systemctl():
Check whenever is local system running on systemd.
Returns True if current system is systemctl compatible, False otherwise.
"""
- systemctl_paths = [
- '/usr/bin/systemctl',
- '/bin/systemctl',
- '/var/run/systemd/system',
+ systemd_paths = [
'/run/systemd/system',
+ '/var/run/systemd/system',
]
- for path in systemctl_paths:
- if os.path.exists(path):
+ for path in systemd_paths:
+ if os.path.isdir(path):
return True
return False
@@ -270,11 +263,10 @@ def is_service_installed(runner, service, instance=None):
service -- name of service
instance -- systemd service instance
"""
- if is_systemctl():
- service_name = "{0}{1}".format(service, "" if instance is None else "@")
- return service_name in get_systemd_services(runner)
- else:
+ if not is_systemctl():
return service in get_non_systemd_services(runner)
+ service_name = "{0}{1}".format(service, "" if instance is None else "@")
+ return service_name in get_systemd_services(runner)
def get_non_systemd_services(runner):
@@ -367,6 +359,10 @@ class CommandRunner(object):
self._env_vars = env_vars if env_vars else dict()
self._python2 = sys.version[0] == "2"
+ @property
+ def env_vars(self):
+ return self._env_vars.copy()
+
def run(
self, args, stdin_string=None, env_extend=None, binary_output=False
):
@@ -442,6 +438,7 @@ class CommandRunner(object):
return out_std, out_err, retval
+# deprecated
class NodeCommunicationException(Exception):
# pylint: disable=super-init-not-called
def __init__(self, node, command, reason):
@@ -450,28 +447,35 @@ class NodeCommunicationException(Exception):
self.reason = reason
+# deprecated
class NodeConnectionException(NodeCommunicationException):
pass
+# deprecated
class NodeAuthenticationException(NodeCommunicationException):
pass
+# deprecated
class NodePermissionDeniedException(NodeCommunicationException):
pass
+# deprecated
class NodeCommandUnsuccessfulException(NodeCommunicationException):
pass
+# deprecated
class NodeUnsupportedCommandException(NodeCommunicationException):
pass
+# deprecated
class NodeConnectionTimedOutException(NodeCommunicationException):
pass
+# deprecated
def node_communicator_exception_to_report_item(
e, severity=ReportItemSeverity.ERROR, forceable=None
):
@@ -508,6 +512,7 @@ def node_communicator_exception_to_report_item(
)
raise e
+# deprecated, use pcs.common.node_communicator.Communicator
class NodeCommunicator(object):
"""
Sends requests to nodes
@@ -712,39 +717,3 @@ class NodeCommunicator(object):
).decode("utf-8")
))
return cookies
-
-
-def parallel_nodes_communication_helper(
- func, func_args_kwargs, reporter, skip_offline_nodes=False
-):
- """
- Help running node calls in parallel and handle communication exceptions.
- Raise LibraryError on any failure.
-
- function func function to be run, should be a function calling a node
- iterable func_args_kwargs list of tuples: (*args, **kwargs)
- bool skip_offline_nodes do not raise LibraryError if a node is unreachable
- """
- failure_severity = ReportItemSeverity.ERROR
- failure_forceable = report_codes.SKIP_OFFLINE_NODES
- if skip_offline_nodes:
- failure_severity = ReportItemSeverity.WARNING
- failure_forceable = None
- report_items = []
-
- def _parallel(*args, **kwargs):
- try:
- func(*args, **kwargs)
- except NodeCommunicationException as e:
- report_items.append(
- node_communicator_exception_to_report_item(
- e,
- failure_severity,
- failure_forceable
- )
- )
- except LibraryError as e:
- report_items.extend(e.args)
-
- tools_run_parallel(_parallel, func_args_kwargs)
- reporter.process_list(report_items)
diff --git a/pcs/lib/node.py b/pcs/lib/node.py
index 3de4843..9b862ab 100644
--- a/pcs/lib/node.py
+++ b/pcs/lib/node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
diff --git a/pcs/lib/node_communication.py b/pcs/lib/node_communication.py
new file mode 100644
index 0000000..b57cf97
--- /dev/null
+++ b/pcs/lib/node_communication.py
@@ -0,0 +1,177 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import os
+
+from pcs.common import pcs_pycurl as pycurl
+from pcs.common.node_communicator import CommunicatorLoggerInterface
+from pcs.lib.errors import ReportItemSeverity
+from pcs.lib import reports
+
+
+class LibCommunicatorLogger(CommunicatorLoggerInterface):
+ def __init__(self, logger, reporter):
+ self._logger = logger
+ self._reporter = reporter
+
+ def log_request_start(self, request):
+ msg = "Sending HTTP Request to: {url}"
+ if request.data:
+ msg += "\n--Debug Input Start--\n{data}\n--Debug Input End--"
+ self._logger.debug(
+ msg.format(url=request.url, data=request.data)
+ )
+ self._reporter.process(
+ reports.node_communication_started(request.url, request.data)
+ )
+
+ def log_response(self, response):
+ if response.was_connected:
+ self._log_response_successful(response)
+ else:
+ self._log_response_failure(response)
+ self._log_debug(response)
+
+ def _log_response_successful(self, response):
+ url = response.request.url
+ msg = (
+ "Finished calling: {url}\nResponse Code: {code}"
+ + "\n--Debug Response Start--\n{response}\n--Debug Response End--"
+ )
+ self._logger.debug(msg.format(
+ url=url,
+ code=response.response_code,
+ response=response.data
+ ))
+ self._reporter.process(reports.node_communication_finished(
+ url, response.response_code, response.data
+ ))
+
+ def _log_response_failure(self, response):
+ msg = "Unable to connect to {node} ({reason})"
+ self._logger.debug(msg.format(
+ node=response.request.host, reason=response.error_msg
+ ))
+ self._reporter.process(
+ reports.node_communication_not_connected(
+ response.request.host, response.error_msg
+ )
+ )
+ if is_proxy_set(os.environ):
+ self._logger.warning("Proxy is set")
+ self._reporter.process(reports.node_communication_proxy_is_set(
+ response.request.host_label, response.request.host
+ ))
+
+ def _log_debug(self, response):
+ url = response.request.url
+ debug_data = response.debug
+ self._logger.debug(
+ (
+ "Communication debug info for calling: {url}\n"
+ "--Debug Communication Info Start--\n"
+ "{data}\n"
+ "--Debug Communication Info End--"
+ ).format(url=url, data=debug_data)
+ )
+ self._reporter.process(
+ reports.node_communication_debug_info(url, debug_data)
+ )
+
+ def log_retry(self, response, previous_host):
+ msg = (
+ "Unable to connect to '{label}' via address '{old_addr}'. Retrying "
+ "request '{req}' via address '{new_addr}'"
+ ).format(
+ label=response.request.host_label,
+ old_addr=previous_host,
+ new_addr=response.request.host,
+ req=response.request.url,
+ )
+ self._logger.warning(msg)
+ self._reporter.process(reports.node_communication_retrying(
+ response.request.host_label,
+ previous_host,
+ response.request.host,
+ response.request.url,
+ ))
+
+ def log_no_more_addresses(self, response):
+ msg = "No more addresses for node {label} to run '{req}'".format(
+ label=response.request.host_label,
+ req=response.request.url,
+ )
+ self._logger.warning(msg)
+ self._reporter.process(reports.node_communication_no_more_addresses(
+ response.request.host_label, response.request.url
+ ))
+
+
+def response_to_report_item(
+ response, severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ Returns report item which corresponds to response if was not successful.
+ Otherwise returns None.
+
+ Response response -- response from which report item shoculd be created
+ ReportItemseverity severity -- severity of report item
+ string forceable -- force code
+ """
+ response_code = response.response_code
+ report = None
+ reason = None
+ if response.was_connected:
+ if response_code == 400:
+ # old pcsd protocol: error messages are commonly passed in plain
+ # text in response body with HTTP code 400
+ # we need to be backward compatible with that
+ report = reports.node_communication_command_unsuccessful
+ reason = response.data.rstrip()
+ elif response_code == 401:
+ report = reports.node_communication_error_not_authorized
+ reason = "HTTP error: {0}".format(response_code)
+ elif response_code == 403:
+ report = reports.node_communication_error_permission_denied
+ reason = "HTTP error: {0}".format(response_code)
+ elif response_code == 404:
+ report = reports.node_communication_error_unsupported_command
+ reason = "HTTP error: {0}".format(response_code)
+ elif response_code >= 400:
+ report = reports.node_communication_error_other_error
+ reason = "HTTP error: {0}".format(response_code)
+ else:
+ if response.errno in [
+ pycurl.E_OPERATION_TIMEDOUT, pycurl.E_OPERATION_TIMEOUTED
+ ]:
+ report = reports.node_communication_error_timed_out
+ reason = response.error_msg
+ else:
+ report = reports.node_communication_error_unable_to_connect
+ reason = response.error_msg
+ if not report:
+ return None
+ return report(
+ response.request.host,
+ response.request.action,
+ reason,
+ severity,
+ forceable,
+ )
+
+
+def is_proxy_set(env_dict):
+ """
+ Returns True whenever any of proxy environment variables (https_proxy,
+ HTTPS_PROXY, all_proxy, ALL_PROXY) are set in env_dict. False otherwise.
+
+ dict env_dict -- environment variables in dict
+ """
+ proxy_list = ["https_proxy", "all_proxy"]
+ for var in proxy_list + [v.upper() for v in proxy_list]:
+ if env_dict.get(var, "") != "":
+ return True
+ return False
diff --git a/pcs/lib/node_communication_format.py b/pcs/lib/node_communication_format.py
index f05d9b2..876441e 100644
--- a/pcs/lib/node_communication_format.py
+++ b/pcs/lib/node_communication_format.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from collections import namedtuple
from pcs.lib import reports
diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
deleted file mode 100644
index ce26923..0000000
--- a/pcs/lib/nodes_task.py
+++ /dev/null
@@ -1,521 +0,0 @@
-from __future__ import (
- absolute_import,
- division,
- print_function,
- unicode_literals,
-)
-
-from collections import defaultdict
-import json
-
-from pcs.common import report_codes
-from pcs.common.tools import run_parallel as tools_run_parallel
-from pcs.lib import reports, node_communication_format
-from pcs.lib.errors import LibraryError, ReportItemSeverity, ReportListAnalyzer
-from pcs.lib.external import (
- NodeCommunicator,
- NodeCommunicationException,
- NodeCommandUnsuccessfulException,
- node_communicator_exception_to_report_item,
- parallel_nodes_communication_helper,
-)
-from pcs.lib.corosync import (
- live as corosync_live,
- qdevice_client,
-)
-
-
-def _call_for_json(
- node_communicator, node, request_path, report_items,
- data=None, request_timeout=None, warn_on_communication_exception=False
-):
- """
- Return python object parsed from a json call response.
- """
- try:
- return json.loads(node_communicator.call_node(
- node,
- request_path,
- data=None if data is None
- else NodeCommunicator.format_data_dict(data)
- ,
- request_timeout=request_timeout
- ))
- except NodeCommandUnsuccessfulException as e:
- report_items.append(
- reports.node_communication_command_unsuccessful(
- e.node,
- e.command,
- e.reason,
- severity=(
- ReportItemSeverity.WARNING
- if warn_on_communication_exception else
- ReportItemSeverity.ERROR
- ),
- forceable=(
- None if warn_on_communication_exception
- else report_codes.SKIP_OFFLINE_NODES
- ),
- )
- )
-
- except NodeCommunicationException as e:
- report_items.append(
- node_communicator_exception_to_report_item(
- e,
- ReportItemSeverity.WARNING if warn_on_communication_exception
- else ReportItemSeverity.ERROR
- ,
- forceable=None if warn_on_communication_exception
- else report_codes.SKIP_OFFLINE_NODES
- )
- )
- except ValueError:
- #e.g. response is not in json format
- report_items.append(reports.invalid_response_format(node.label))
-
-
-def distribute_corosync_conf(
- node_communicator, reporter, node_addr_list, config_text,
- skip_offline_nodes=False
-):
- """
- Send corosync.conf to several cluster nodes
- node_addr_list nodes to send config to (NodeAddressesList instance)
- config_text text of corosync.conf
- skip_offline_nodes don't raise an error if a node communication error occurs
- """
- failure_severity = ReportItemSeverity.ERROR
- failure_forceable = report_codes.SKIP_OFFLINE_NODES
- if skip_offline_nodes:
- failure_severity = ReportItemSeverity.WARNING
- failure_forceable = None
- report_items = []
-
- def _parallel(node):
- try:
- corosync_live.set_remote_corosync_conf(
- node_communicator,
- node,
- config_text
- )
- reporter.process(
- reports.corosync_config_accepted_by_node(node.label)
- )
- except NodeCommunicationException as e:
- report_items.append(
- node_communicator_exception_to_report_item(
- e,
- failure_severity,
- failure_forceable
- )
- )
- report_items.append(
- reports.corosync_config_distribution_node_error(
- node.label,
- failure_severity,
- failure_forceable
- )
- )
-
- reporter.process(reports.corosync_config_distribution_started())
- tools_run_parallel(
- _parallel,
- [((node, ), {}) for node in node_addr_list]
- )
- reporter.process_list(report_items)
-
-def check_corosync_offline_on_nodes(
- node_communicator, reporter, node_addr_list, skip_offline_nodes=False
-):
- """
- Check corosync is not running on cluster nodes
- node_addr_list nodes to send config to (NodeAddressesList instance)
- skip_offline_nodes don't raise an error if a node communication error occurs
- """
- failure_severity = ReportItemSeverity.ERROR
- failure_forceable = report_codes.SKIP_OFFLINE_NODES
- if skip_offline_nodes:
- failure_severity = ReportItemSeverity.WARNING
- failure_forceable = None
- report_items = []
-
- def _parallel(node):
- try:
- status = node_communicator.call_node(node, "remote/status", None)
- if not json.loads(status)["corosync"]:
- reporter.process(
- reports.corosync_not_running_on_node_ok(node.label)
- )
- else:
- report_items.append(
- reports.corosync_running_on_node_fail(node.label)
- )
- except NodeCommunicationException as e:
- report_items.append(
- node_communicator_exception_to_report_item(
- e,
- failure_severity,
- failure_forceable
- )
- )
- report_items.append(
- reports.corosync_not_running_check_node_error(
- node.label,
- failure_severity,
- failure_forceable
- )
- )
- except (ValueError, LookupError):
- report_items.append(
- reports.corosync_not_running_check_node_error(
- node.label,
- failure_severity,
- failure_forceable
- )
- )
-
- reporter.process(reports.corosync_not_running_check_started())
- tools_run_parallel(
- _parallel,
- [((node, ), {}) for node in node_addr_list]
- )
- reporter.process_list(report_items)
-
-def qdevice_reload_on_nodes(
- node_communicator, reporter, node_addr_list, skip_offline_nodes=False
-):
- """
- Reload corosync-qdevice configuration on cluster nodes
- NodeAddressesList node_addr_list nodes to reload config on
- bool skip_offline_nodes don't raise an error on node communication errors
- """
- reporter.process(reports.qdevice_client_reload_started())
- parallel_params = [
- [(reporter, node_communicator, node), {}]
- for node in node_addr_list
- ]
- # catch an exception so we try to start qdevice on nodes where we stopped it
- report_items = []
- try:
- parallel_nodes_communication_helper(
- qdevice_client.remote_client_stop,
- parallel_params,
- reporter,
- skip_offline_nodes
- )
- except LibraryError as e:
- report_items.extend(e.args)
- try:
- parallel_nodes_communication_helper(
- qdevice_client.remote_client_start,
- parallel_params,
- reporter,
- skip_offline_nodes
- )
- except LibraryError as e:
- report_items.extend(e.args)
- reporter.process_list(report_items)
-
-def node_check_auth(communicator, node):
- """
- Check authentication and online status of 'node'.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- communicator.call_node(
- node,
- "remote/check_auth",
- NodeCommunicator.format_data_dict({"check_auth_only": 1})
- )
-
-def availability_checker_node(availability_info, report_items, node_label):
- """
- Check if availability_info means that the node is suitable as cluster
- (corosync) node.
- """
- if availability_info["node_available"]:
- return
-
- if availability_info.get("pacemaker_running", False):
- report_items.append(reports.cannot_add_node_is_running_service(
- node_label,
- "pacemaker"
- ))
- return
-
- if availability_info.get("pacemaker_remote", False):
- report_items.append(reports.cannot_add_node_is_running_service(
- node_label,
- "pacemaker_remote"
- ))
- return
-
- report_items.append(reports.cannot_add_node_is_in_cluster(node_label))
-
-def availability_checker_remote_node(
- availability_info, report_items, node_label
-):
- """
- Check if availability_info means that the node is suitable as remote node.
- """
- if availability_info["node_available"]:
- return
-
- if availability_info.get("pacemaker_running", False):
- report_items.append(reports.cannot_add_node_is_running_service(
- node_label,
- "pacemaker"
- ))
- return
-
- if not availability_info.get("pacemaker_remote", False):
- report_items.append(reports.cannot_add_node_is_in_cluster(node_label))
- return
-
-
-def check_can_add_node_to_cluster(
- node_communicator, node, report_items,
- check_response=availability_checker_node,
- warn_on_communication_exception=False,
-):
- """
- Analyze result of node_available check if it is possible use the node as
- cluster node.
-
- NodeCommunicator node_communicator is an object for making the http request
- NodeAddresses node specifies the destination url
- list report_items is place where report items should be collected
- callable check_response -- make decision about availability based on
- response info
- """
- safe_report_items = []
- availability_info = _call_for_json(
- node_communicator,
- node,
- "remote/node_available",
- safe_report_items,
- warn_on_communication_exception=warn_on_communication_exception
- )
- report_items.extend(safe_report_items)
-
- if ReportListAnalyzer(safe_report_items).error_list:
- return
-
- #If there was a communication error and --skip-offline is in effect, no
- #exception was raised. If there is no result cannot process it.
- #Note: the error may be caused by older pcsd daemon not supporting commands
- #sent by newer client.
- if not availability_info:
- return
-
- is_in_expected_format = (
- isinstance(availability_info, dict)
- and
- #node_available is a mandatory field
- "node_available" in availability_info
- )
-
- if not is_in_expected_format:
- report_items.append(reports.invalid_response_format(node.label))
- return
-
- check_response(availability_info, report_items, node.label)
-
-def run_actions_on_node(
- node_communicator, path, response_key, report_processor, node, actions,
- warn_on_communication_exception=False
-):
- """
- NodeCommunicator node_communicator is an object for making the http request
- NodeAddresses node specifies the destination url
- dict actions has key that identifies the action and value is a dict
- with a data that are specific per action type. Mandatory keys there are:
- * type - is type of file like "booth_autfile" or "pcmk_remote_authkey"
- For type == 'service_command' are mandatory
- * service - specify the service (eg. pacemaker_remote)
- * command - specify the command should be applied on service
- (eg. enable or start)
- """
- report_items = []
- action_results = _call_for_json(
- node_communicator,
- node,
- path,
- report_items,
- [("data_json", json.dumps(actions))],
- warn_on_communication_exception=warn_on_communication_exception
- )
-
- #can raise
- report_processor.process_list(report_items)
- #If there was a communication error and --skip-offline is in effect, no
- #exception was raised. If there is no result cannot process it.
- #Note: the error may be caused by older pcsd daemon not supporting commands
- #sent by newer client.
- if not action_results:
- return
-
-
- return node_communication_format.response_to_result(
- action_results,
- response_key,
- actions.keys(),
- node.label,
- )
-
-def _run_actions_on_multiple_nodes(
- node_communicator, url, response_key, report_processor, create_start_report,
- actions, node_addresses_list, is_success,
- create_success_report, create_error_report, force_code, format_result,
- skip_offline_nodes=False,
- allow_incomplete_distribution=False, description=""
-):
- error_map = defaultdict(dict)
- def worker(node_addresses):
- result = run_actions_on_node(
- node_communicator,
- url,
- response_key,
- report_processor,
- node_addresses,
- actions,
- warn_on_communication_exception=skip_offline_nodes,
- )
- #If there was a communication error and --skip-offline is in effect, no
- #exception was raised. If there is no result cannot process it.
- #Note: the error may be caused by older pcsd daemon not supporting
- #commands sent by newer client.
- if not result:
- return
-
- for key, item_response in sorted(result.items()):
- if is_success(key, item_response):
- #only success process individually
- report_processor.process(
- create_success_report(node_addresses.label, key)
- )
- else:
- error_map[node_addresses.label][key] = format_result(
- item_response
- )
-
- report_processor.process(create_start_report(
- actions.keys(),
- [node.label for node in node_addresses_list],
- description
- ))
-
- parallel_nodes_communication_helper(
- worker,
- [([node_addresses], {}) for node_addresses in node_addresses_list],
- report_processor,
- allow_incomplete_distribution,
- )
-
- #now we process errors
- if error_map:
- make_report = reports.get_problem_creator(
- force_code,
- allow_incomplete_distribution
- )
- report_processor.process_list([
- make_report(create_error_report, node_name, action_key, message)
- for node_name, errors in error_map.items()
- for action_key, message in errors.items()
- ])
-
-def distribute_files(
- node_communicator, report_processor, file_definitions, node_addresses_list,
- skip_offline_nodes=False,
- allow_incomplete_distribution=False, description=""
-):
- """
- Put files specified in file_definitions to nodes specified in
- node_addresses_list.
-
- NodeCommunicator node_communicator is an object for making the http request
- NodeAddresses node specifies the destination url
- dict file_definitions has key that identifies the file and value is a dict
- with a data that are specific per file type. Mandatory keys there are:
- * type - is type of file like "booth_autfile" or "pcmk_remote_authkey"
- * data - it contains content of file in file specific format (e.g.
- binary is encoded by base64)
- Common optional key is "rewrite_existing" (True/False) that specifies
- the behaviour when file already exists.
- bool allow_incomplete_distribution keep success even if some node(s) are
- unavailable
- """
- _run_actions_on_multiple_nodes(
- node_communicator,
- "remote/put_file",
- "files",
- report_processor,
- reports.files_distribution_started,
- file_definitions,
- node_addresses_list,
- lambda key, response: response.code in [
- "written",
- "rewritten",
- "same_content",
- ],
- reports.file_distribution_success,
- reports.file_distribution_error,
- report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
- node_communication_format.get_format_result({
- "conflict": "File already exists",
- }),
- skip_offline_nodes,
- allow_incomplete_distribution,
- description,
- )
-
-def remove_files(
- node_communicator, report_processor, file_definitions, node_addresses_list,
- skip_offline_nodes=False,
- allow_incomplete_distribution=False, description=""
-):
- _run_actions_on_multiple_nodes(
- node_communicator,
- "remote/remove_file",
- "files",
- report_processor,
- reports.files_remove_from_node_started,
- file_definitions,
- node_addresses_list,
- lambda key, response: response.code in ["deleted", "not_found"],
- reports.file_remove_from_node_success,
- reports.file_remove_from_node_error,
- report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
- node_communication_format.get_format_result({}),
- skip_offline_nodes,
- allow_incomplete_distribution,
- description,
- )
-
-def run_actions_on_multiple_nodes(
- node_communicator, report_processor, action_definitions, is_success,
- node_addresses_list,
- skip_offline_nodes=False,
- allow_fails=False, description=""
-):
- _run_actions_on_multiple_nodes(
- node_communicator,
- "remote/manage_services",
- "actions",
- report_processor,
- reports.service_commands_on_nodes_started,
- action_definitions,
- node_addresses_list,
- is_success,
- reports.service_command_on_node_success,
- reports.service_command_on_node_error,
- report_codes.SKIP_ACTION_ON_NODES_ERRORS,
- node_communication_format.get_format_result({
- "fail": "Operation failed.",
- }),
- skip_offline_nodes,
- allow_fails,
- description,
- )
diff --git a/pcs/lib/pacemaker/env.py b/pcs/lib/pacemaker/env.py
index d852ba4..43f3b07 100644
--- a/pcs/lib/pacemaker/env.py
+++ b/pcs/lib/pacemaker/env.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.common import env_file_role_codes
diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py
index e58a264..6ef5b29 100644
--- a/pcs/lib/pacemaker/live.py
+++ b/pcs/lib/pacemaker/live.py
@@ -2,11 +2,10 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-import os.path
from lxml import etree
+import os.path
from pcs import settings
from pcs.common.tools import (
@@ -17,6 +16,8 @@ from pcs.lib import reports
from pcs.lib.cib.tools import get_pacemaker_version_by_which_cib_was_validated
from pcs.lib.errors import LibraryError
from pcs.lib.pacemaker.state import ClusterState
+from pcs.lib.tools import write_tmpfile
+from pcs.lib.xml_tools import etree_to_str
__EXITCODE_WAIT_TIMEOUT = 62
@@ -41,12 +42,15 @@ def get_cluster_status_xml(runner):
return stdout
### cib
-
-def get_cib_xml(runner, scope=None):
+def get_cib_xml_cmd_results(runner, scope=None):
command = [__exec("cibadmin"), "--local", "--query"]
if scope:
command.append("--scope={0}".format(scope))
- stdout, stderr, retval = runner.run(command)
+ stdout, stderr, returncode = runner.run(command)
+ return stdout, stderr, returncode
+
+def get_cib_xml(runner, scope=None):
+ stdout, stderr, retval = get_cib_xml_cmd_results(runner, scope)
if retval != 0:
if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope:
raise LibraryError(
@@ -70,6 +74,24 @@ def get_cib(xml):
except (etree.XMLSyntaxError, etree.DocumentInvalid):
raise LibraryError(reports.cib_load_error_invalid_format())
+def verify(runner, verbose=False):
+ crm_verify_cmd = [__exec("crm_verify")]
+ if verbose:
+ crm_verify_cmd.append("-V")
+
+ #With the `crm_verify` command it is not possible simply use the environment
+ #variable CIB_file because `crm_verify` simply tries to connect to cib file
+ #via tool that can fail because: Update does not conform to the configured
+ #schema
+ #So we use the explicit flag `--xml-file`.
+ cib_tmp_file = runner.env_vars.get("CIB_file", None)
+ if cib_tmp_file is None:
+ crm_verify_cmd.append("--live-check")
+ else:
+ crm_verify_cmd.extend(["--xml-file", cib_tmp_file])
+ #the tuple (stdout, stderr, returncode) is returned here
+ return runner.run(crm_verify_cmd)
+
def replace_cib_configuration_xml(runner, xml):
cmd = [
__exec("cibadmin"),
@@ -83,12 +105,55 @@ def replace_cib_configuration_xml(runner, xml):
raise LibraryError(reports.cib_push_error(stderr, stdout))
def replace_cib_configuration(runner, tree):
- #etree returns bytes: b'xml'
- #python 3 removed .encode() from bytes
- #run(...) calls subprocess.Popen.communicate which calls encode...
- #so here is bytes to str conversion
- xml = etree.tostring(tree).decode()
- return replace_cib_configuration_xml(runner, xml)
+ return replace_cib_configuration_xml(runner, etree_to_str(tree))
+
+def push_cib_diff_xml(runner, cib_diff_xml):
+ cmd = [
+ __exec("cibadmin"),
+ "--patch",
+ "--verbose",
+ "--xml-pipe",
+ ]
+ stdout, stderr, retval = runner.run(cmd, stdin_string=cib_diff_xml)
+ if retval != 0:
+ raise LibraryError(reports.cib_push_error(stderr, stdout))
+
+def diff_cibs_xml(runner, reporter, cib_old_xml, cib_new_xml):
+ """
+ Return xml diff of two CIBs
+ CommandRunner runner
+ string cib_old_xml -- original CIB
+ string cib_new_xml -- modified CIB
+ """
+ try:
+ cib_old_tmp_file = write_tmpfile(cib_old_xml)
+ reporter.process(
+ reports.tmp_file_write(cib_old_tmp_file.name, cib_old_xml)
+ )
+ cib_new_tmp_file = write_tmpfile(cib_new_xml)
+ reporter.process(
+ reports.tmp_file_write(cib_new_tmp_file.name, cib_new_xml)
+ )
+ except EnvironmentError as e:
+ raise LibraryError(reports.cib_save_tmp_error(str(e)))
+ command = [
+ __exec("crm_diff"),
+ "--original",
+ cib_old_tmp_file.name,
+ "--new",
+ cib_new_tmp_file.name,
+ "--no-version",
+ ]
+ # dummy_retval == 1 means one of two things:
+ # a) an error has occured
+ # b) --original and --new differ
+ # therefore it's of no use to see if an error occurred
+ stdout, stderr, dummy_retval = runner.run(command)
+ if stderr.strip():
+ raise LibraryError(
+ reports.cib_diff_error(stderr.strip(), cib_old_xml, cib_new_xml)
+ )
+ return stdout.strip()
def ensure_cib_version(runner, cib, version):
"""
diff --git a/pcs/lib/pacemaker/state.py b/pcs/lib/pacemaker/state.py
index be3e7ad..6b87d8b 100644
--- a/pcs/lib/pacemaker/state.py
+++ b/pcs/lib/pacemaker/state.py
@@ -7,7 +7,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os.path
diff --git a/pcs/lib/pacemaker/test/test_live.py b/pcs/lib/pacemaker/test/test_live.py
index 7a53389..bb731c0 100644
--- a/pcs/lib/pacemaker/test/test_live.py
+++ b/pcs/lib/pacemaker/test/test_live.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
@@ -23,6 +22,12 @@ import pcs.lib.pacemaker.live as lib
from pcs.lib.errors import ReportItemSeverity as Severity
from pcs.lib.external import CommandRunner
+def get_runner(stdout="", stderr="", returncode=0, env_vars=None):
+ runner = mock.MagicMock(spec_set=CommandRunner)
+ runner.run.return_value = (stdout, stderr, returncode)
+ runner.env_vars = env_vars if env_vars else {}
+ return runner
+
class LibraryPacemakerTest(TestCase):
def path(self, name):
@@ -68,8 +73,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
expected_stdout = "<xml />"
expected_stderr = ""
expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -84,8 +88,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
expected_stdout = "some info"
expected_stderr = "some error"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -109,8 +112,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
expected_stdout = "<xml />"
expected_stderr = ""
expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -127,8 +129,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
expected_stdout = "some info"
expected_stderr = "some error"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -154,8 +155,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
expected_stderr = ""
expected_retval = 0
scope = "test_scope"
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -176,8 +176,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
expected_stderr = "some error"
expected_retval = 6
scope = "test_scope"
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -219,14 +218,44 @@ class GetCibTest(LibraryPacemakerTest):
)
)
+class Verify(LibraryPacemakerTest):
+ def test_run_on_live_cib(self):
+ runner = get_runner()
+ self.assertEqual(
+ lib.verify(runner),
+ runner.run.return_value
+ )
+ runner.run.assert_called_once_with(
+ [self.path("crm_verify"), "--live-check"],
+ )
+
+ def test_run_on_mocked_cib(self):
+ fake_tmp_file = "/fake/tmp/file"
+ runner = get_runner(env_vars={"CIB_file": fake_tmp_file})
+
+ self.assertEqual(lib.verify(runner), runner.run.return_value)
+ runner.run.assert_called_once_with(
+ [self.path("crm_verify"), "--xml-file", fake_tmp_file],
+ )
+
+ def test_run_verbose(self):
+ runner = get_runner()
+ self.assertEqual(
+ lib.verify(runner, verbose=True),
+ runner.run.return_value
+ )
+ runner.run.assert_called_once_with(
+ [self.path("crm_verify"), "-V", "--live-check"],
+ )
+
+
class ReplaceCibConfigurationTest(LibraryPacemakerTest):
def test_success(self):
xml = "<xml/>"
expected_stdout = "expected output"
expected_stderr = ""
expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -250,8 +279,7 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
expected_stdout = "expected output"
expected_stderr = "expected stderr"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -282,21 +310,18 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
)
class UpgradeCibTest(TestCase):
- def setUp(self):
- self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-
def test_success(self):
- self.mock_runner.run.return_value = "", "", 0
- lib._upgrade_cib(self.mock_runner)
- self.mock_runner.run.assert_called_once_with(
+ mock_runner = get_runner("", "", 0)
+ lib._upgrade_cib(mock_runner)
+ mock_runner.run.assert_called_once_with(
["/usr/sbin/cibadmin", "--upgrade", "--force"]
)
def test_error(self):
error = "Call cib_upgrade failed (-62): Timer expired"
- self.mock_runner.run.return_value = "", error, 62
+ mock_runner = get_runner("", error, 62)
assert_raise_library_error(
- lambda: lib._upgrade_cib(self.mock_runner),
+ lambda: lib._upgrade_cib(mock_runner),
(
Severity.ERROR,
report_codes.CIB_UPGRADE_FAILED,
@@ -305,16 +330,16 @@ class UpgradeCibTest(TestCase):
}
)
)
- self.mock_runner.run.assert_called_once_with(
+ mock_runner.run.assert_called_once_with(
["/usr/sbin/cibadmin", "--upgrade", "--force"]
)
def test_already_at_latest_schema(self):
error = ("Call cib_upgrade failed (-211): Schema is already "
"the latest available")
- self.mock_runner.run.return_value = "", error, 211
- lib._upgrade_cib(self.mock_runner)
- self.mock_runner.run.assert_called_once_with(
+ mock_runner = get_runner("", error, 211)
+ lib._upgrade_cib(mock_runner)
+ mock_runner.run.assert_called_once_with(
["/usr/sbin/cibadmin", "--upgrade", "--force"]
)
@@ -412,8 +437,7 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
expected_stdout = "some info"
expected_stderr = "some error"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -429,8 +453,7 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
expected_stdout = "invalid xml"
expected_stderr = ""
expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -616,8 +639,7 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
class RemoveNode(LibraryPacemakerTest):
def test_success(self):
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = ("", "", 0)
+ mock_runner = get_runner("", "", 0)
lib.remove_node(
mock_runner,
"NODE_NAME"
@@ -630,9 +652,8 @@ class RemoveNode(LibraryPacemakerTest):
])
def test_error(self):
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
expected_stderr = "expected stderr"
- mock_runner.run.return_value = ("", expected_stderr, 1)
+ mock_runner = get_runner("", expected_stderr, 1)
assert_raise_library_error(
lambda: lib.remove_node(mock_runner, "NODE_NAME") ,
(
@@ -678,8 +699,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
)
def test_threshold_exceeded(self):
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
self.fixture_status_xml(1000, 1000),
"",
0
@@ -700,8 +720,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
def test_forced(self):
expected_stdout = "expected output"
expected_stderr = "expected stderr"
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
+ mock_runner = get_runner(expected_stdout, expected_stderr, 0)
real_output = lib.resource_cleanup(mock_runner, force=True)
@@ -717,8 +736,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
resource = "test_resource"
expected_stdout = "expected output"
expected_stderr = "expected stderr"
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
+ mock_runner = get_runner(expected_stdout, expected_stderr, 0)
real_output = lib.resource_cleanup(mock_runner, resource=resource)
@@ -734,8 +752,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
node = "test_node"
expected_stdout = "expected output"
expected_stderr = "expected stderr"
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
+ mock_runner = get_runner(expected_stdout, expected_stderr, 0)
real_output = lib.resource_cleanup(mock_runner, node=node)
@@ -752,8 +769,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
resource = "test_resource"
expected_stdout = "expected output"
expected_stderr = "expected stderr"
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
+ mock_runner = get_runner(expected_stdout, expected_stderr, 0)
real_output = lib.resource_cleanup(
mock_runner, resource=resource, node=node
@@ -774,8 +790,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
expected_stdout = "some info"
expected_stderr = "some error"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -829,8 +844,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_stdout = ""
expected_stderr = "something --wait something else"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -847,8 +861,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_stdout = "something --wait something else"
expected_stderr = ""
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -865,8 +878,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_stdout = "something something else"
expected_stderr = "something something else"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -906,8 +918,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_stdout = "expected output"
expected_stderr = "expected stderr"
expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -924,8 +935,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_stderr = "expected stderr"
expected_retval = 0
timeout = 10
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -944,8 +954,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_stdout = "some info"
expected_stderr = "some error"
expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
@@ -970,8 +979,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_stdout = "some info"
expected_stderr = "some error"
expected_retval = 62
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
+ mock_runner = get_runner(
expected_stdout,
expected_stderr,
expected_retval
diff --git a/pcs/lib/pacemaker/test/test_state.py b/pcs/lib/pacemaker/test/test_state.py
index 5de9426..13628f4 100644
--- a/pcs/lib/pacemaker/test/test_state.py
+++ b/pcs/lib/pacemaker/test/test_state.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase, mock
diff --git a/pcs/lib/pacemaker/test/test_values.py b/pcs/lib/pacemaker/test/test_values.py
index ce3522a..4bbadf9 100644
--- a/pcs/lib/pacemaker/test/test_values.py
+++ b/pcs/lib/pacemaker/test/test_values.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/pacemaker/values.py b/pcs/lib/pacemaker/values.py
index 180d7f8..ed5da12 100644
--- a/pcs/lib/pacemaker/values.py
+++ b/pcs/lib/pacemaker/values.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import re
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index bda17ca..0085b44 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from functools import partial
@@ -318,6 +317,20 @@ def mutually_exclusive_options(option_names, option_type):
},
)
+def invalid_cib_content(report):
+ """
+ Given cib content is not valid.
+ string report -- is human readable explanation of a cib invalidity. For
+ example a stderr of `crm_verify`.
+ """
+ return ReportItem.error(
+ report_codes.INVALID_CIB_CONTENT,
+ info={
+ "report": report,
+ }
+ )
+
+
def invalid_id_is_empty(id, id_description):
"""
@@ -487,6 +500,20 @@ def node_communication_not_connected(node, reason):
}
)
+
+def node_communication_no_more_addresses(node, request):
+ """
+ request failed and there are no more addresses to try it again
+ """
+ return ReportItem.warning(
+ report_codes.NODE_COMMUNICATION_NO_MORE_ADDRESSES,
+ info={
+ "node": node,
+ "request": request,
+ }
+ )
+
+
def node_communication_error_not_authorized(
node, command, reason,
severity=ReportItemSeverity.ERROR, forceable=None
@@ -626,12 +653,35 @@ def node_communication_error_timed_out(
forceable=forceable
)
-def node_communication_proxy_is_set():
+def node_communication_proxy_is_set(node=None, address=None):
"""
Warning when connection failed and there is proxy set in environment
variables
"""
- return ReportItem.warning(report_codes.NODE_COMMUNICATION_PROXY_IS_SET)
+ return ReportItem.warning(
+ report_codes.NODE_COMMUNICATION_PROXY_IS_SET,
+ info={
+ "node": node,
+ "address": address,
+ }
+ )
+
+
+def node_communication_retrying(node, failed_address, next_address, request):
+ """
+ Request failed due communication error connecting via specified address,
+ therefore trying another address if there is any.
+ """
+ return ReportItem.warning(
+ report_codes.NODE_COMMUNICATION_RETRYING,
+ info={
+ "node": node,
+ "failed_address": failed_address,
+ "next_address": next_address,
+ "request": request,
+ }
+ )
+
def cannot_add_node_is_in_cluster(node):
"""
@@ -656,6 +706,12 @@ def cannot_add_node_is_running_service(node, service):
}
)
+def defaults_can_be_overriden():
+ """
+ Warning when settings defaults (op_defaults, rsc_defaults...)
+ """
+ return ReportItem.warning(report_codes.DEFAULTS_CAN_BE_OVERRIDEN)
+
def corosync_config_distribution_started():
"""
corosync configuration is about to be sent to nodes
@@ -1289,6 +1345,22 @@ def cib_save_tmp_error(reason):
}
)
+def cib_diff_error(reason, cib_old, cib_new):
+ """
+ cannot obtain a diff of CIBs
+ string reason -- error description
+ string cib_old -- the CIB to be diffed against
+ string cib_new -- the CIB diffed against the old cib
+ """
+ return ReportItem.error(
+ report_codes.CIB_DIFF_ERROR,
+ info={
+ "reason": reason,
+ "cib_old": cib_old,
+ "cib_new": cib_new,
+ }
+ )
+
def cluster_state_cannot_load(reason):
"""
cannot load cluster status from crm_mon, crm_mon exited with non-zero code
@@ -2691,3 +2763,17 @@ def use_command_node_remove_guest(
info={},
forceable=forceable
)
+
+def tmp_file_write(file_path, content):
+ """
+ It has been written into a temporary file
+ string file_path -- the file path
+ string content -- content which has been written
+ """
+ return ReportItem.debug(
+ report_codes.TMP_FILE_WRITE,
+ info={
+ "file_path": file_path,
+ "content": content,
+ }
+ )
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
index ddd1058..4639477 100644
--- a/pcs/lib/resource_agent.py
+++ b/pcs/lib/resource_agent.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from collections import namedtuple
@@ -20,15 +19,6 @@ from pcs.lib.pacemaker.values import is_true
_crm_resource = os.path.join(settings.pacemaker_binaries, "crm_resource")
-DEFAULT_RESOURCE_CIB_ACTION_NAMES = [
- "monitor",
- "start",
- "stop",
- "promote",
- "demote",
-]
-DEFAULT_STONITH_CIB_ACTION_NAMES = ["monitor"]
-
# Operation monitor is required always! No matter if --no-default-ops was
# entered or if agent does not specify it. See
# http://clusterlabs.org/doc/en-US/Pacemaker/1.1-pcs/html-single/Pacemaker_Explained/index.html#_resource_operations
@@ -363,8 +353,6 @@ class Agent(object):
"""
Base class for providing convinient access to an agent's metadata
"""
- DEFAULT_CIB_ACTION_NAMES = []
-
def __init__(self, runner):
"""
create an instance which reads metadata by itself on demand
@@ -555,7 +543,7 @@ class Agent(object):
def get_actions(self):
"""
Get list of agent's actions (operations). Each action is represented as
- dict. Example: [{"name": "monitor", "timeout": 20, "interval": 10}]
+ a dict. Example: [{"name": "monitor", "timeout": 20, "interval": 10}]
"""
action_list = []
for raw_action in self._get_raw_actions():
@@ -568,6 +556,9 @@ class Agent(object):
action_list.append(action)
return action_list
+ def _is_cib_default_action(self, action):
+ return False
+
def get_cib_default_actions(self, necessary_only=False):
"""
List actions that should be put to resource on its creation.
@@ -576,10 +567,17 @@ class Agent(object):
action_list = [
action for action in self.get_actions()
- if action.get("name", "") in (
- NECESSARY_CIB_ACTION_NAMES if necessary_only
- else self.DEFAULT_CIB_ACTION_NAMES
- )
+ if (
+ necessary_only
+ and
+ action.get("name") in NECESSARY_CIB_ACTION_NAMES
+ )
+ or
+ (
+ not necessary_only
+ and
+ self._is_cib_default_action(action)
+ )
]
for action_name in NECESSARY_CIB_ACTION_NAMES:
@@ -740,7 +738,6 @@ class CrmAgent(Agent):
class ResourceAgent(CrmAgent):
- DEFAULT_CIB_ACTION_NAMES = DEFAULT_RESOURCE_CIB_ACTION_NAMES
"""
Provides convinient access to a resource agent's metadata
"""
@@ -806,6 +803,20 @@ class ResourceAgent(CrmAgent):
return parameters
+ def _is_cib_default_action(self, action):
+ # Copy all actions to the CIB even those not defined in the OCF standard
+ # or pacemaker. This way even custom actions defined in a resource agent
+ # will be copied to the CIB and run by pacemaker if they specify
+ # an interval. See https://github.com/ClusterLabs/pcs/issues/132
+ return action.get("name") not in [
+ # one-time action, not meant to be processed by pacemaker
+ "meta-data",
+ # deprecated alias of monitor
+ "status",
+ # one-time action, not meant to be processed by pacemaker
+ "validate-all",
+ ]
+
class AbsentAgentMixin(object):
def _load_metadata(self):
@@ -823,8 +834,6 @@ class StonithAgent(CrmAgent):
"""
Provides convinient access to a stonith agent's metadata
"""
- DEFAULT_CIB_ACTION_NAMES = DEFAULT_STONITH_CIB_ACTION_NAMES
-
_stonithd_metadata = None
def _prepare_name_parts(self, name):
@@ -929,6 +938,9 @@ class StonithAgent(CrmAgent):
return True
return False
+ def _is_cib_default_action(self, action):
+ return action.get("name") == "monitor"
+
class AbsentStonithAgent(AbsentAgentMixin, StonithAgent):
def get_parameters(self):
diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
index ff8c71f..de2fd1f 100644
--- a/pcs/lib/sbd.py
+++ b/pcs/lib/sbd.py
@@ -2,24 +2,16 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-import json
from os import path
from pcs import settings
-from pcs.common import tools
from pcs.lib import (
external,
reports,
)
from pcs.lib.tools import dict_to_environment_file, environment_file_to_dict
-from pcs.lib.external import (
- NodeCommunicator,
- node_communicator_exception_to_report_item,
- NodeCommunicationException,
-)
from pcs.lib.errors import LibraryError
@@ -31,30 +23,6 @@ DEVICE_INITIALIZATION_OPTIONS_MAPPING = {
}
-def _run_parallel_and_raise_lib_error_on_failure(func, param_list):
- """
- Run function func in parallel for all specified parameters in arg_list.
- Raise LibraryError on any failure.
-
- func -- function to be run
- param_list -- list of tuples: (*args, **kwargs)
- """
- report_list = []
-
- def _parallel(*args, **kwargs):
- try:
- func(*args, **kwargs)
- except NodeCommunicationException as e:
- report_list.append(node_communicator_exception_to_report_item(e))
- except LibraryError as e:
- report_list.extend(e.args)
-
- tools.run_parallel(_parallel, param_list)
-
- if report_list:
- raise LibraryError(*report_list)
-
-
def _even_number_of_nodes_and_no_qdevice(
corosync_conf_facade, node_number_modifier=0
):
@@ -134,318 +102,15 @@ def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0):
)
-def check_sbd(communicator, node, watchdog, device_list):
- """
- Check SBD on specified 'node' and existence of specified watchdog and
- devices.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- watchdog -- watchdog path
- device_list -- list of strings
- """
- return communicator.call_node(
- node,
- "remote/check_sbd",
- NodeCommunicator.format_data_dict([
- ("watchdog", watchdog),
- ("device_list", NodeCommunicator.format_data_json(device_list)),
- ])
- )
-
-
-def check_sbd_on_node(
- report_processor, node_communicator, node, watchdog, device_list
-):
- """
- Check if SBD can be enabled on specified 'node'.
- Raises LibraryError if check fails.
- Raises NodeCommunicationException if there is communication issue.
-
- report_processor --
- node_communicator -- NodeCommunicator
- node -- NodeAddresses
- watchdog -- watchdog path
- device_list -- list of strings
- """
- report_list = []
- try:
- data = json.loads(
- check_sbd(node_communicator, node, watchdog, device_list)
- )
- if not data["sbd"]["installed"]:
- report_list.append(reports.sbd_not_installed(node.label))
- if not data["watchdog"]["exist"]:
- report_list.append(reports.watchdog_not_found(node.label, watchdog))
- for device in data.get("device_list", []):
- if not device["exist"]:
- report_list.append(reports.sbd_device_does_not_exist(
- device["path"], node.label
- ))
- elif not device["block_device"]:
- report_list.append(reports.sbd_device_is_not_block_device(
- device["path"], node.label
- ))
- # TODO maybe we can check whenever device is initialized by sbd (by
- # running 'sbd -d <dev> dump;')
- except (ValueError, KeyError, TypeError):
- raise LibraryError(reports.invalid_response_format(node.label))
-
- if report_list:
- raise LibraryError(*report_list)
- report_processor.process(reports.sbd_check_success(node.label))
-
-
-def check_sbd_on_all_nodes(report_processor, node_communicator, nodes_data):
- """
- Checks SBD (if SBD is installed and watchdog exists) on all NodeAddresses
- defined as keys in nodes_data.
- Raises LibraryError with all ReportItems in case of any failure.
-
- report_processor --
- node_communicator -- NodeCommunicator
- nodes_data -- dictionary with NodeAddresses as keys and dict (with keys
- 'watchdog' and 'device_list') as value
- """
- report_processor.process(reports.sbd_check_started())
- data_list = []
- for node, data in sorted(nodes_data.items()):
- data_list.append((
- [
- report_processor, node_communicator, node, data["watchdog"],
- data["device_list"]
- ],
- {}
- ))
-
- _run_parallel_and_raise_lib_error_on_failure(check_sbd_on_node, data_list)
-
-
-def set_sbd_config(communicator, node, config):
- """
- Send SBD configuration to 'node'.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- config -- string, SBD configuration file
- """
- communicator.call_node(
- node,
- "remote/set_sbd_config",
- NodeCommunicator.format_data_dict([("config", config)])
- )
-
-
-def set_sbd_config_on_node(
- report_processor, node_communicator, node, config, watchdog,
- device_list=None
-):
- """
- Send SBD configuration to 'node' with specified watchdog set. Also puts
- correct node name into SBD_OPTS option (SBD_OPTS="-n <node_name>").
-
- report_processor --
- node_communicator -- NodeCommunicator
- node -- NodeAddresses
- config -- dictionary in format: <SBD config option>: <value>
- watchdog -- path to watchdog device
- device_list -- list of strings
- """
- config = dict(config)
- config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node.label)
+def create_sbd_config(base_config, node_label, watchdog, device_list=None):
+ # TODO: figure out which name/ring has to be in SBD_OPTS
+ config = dict(base_config)
+ config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node_label)
if watchdog:
config["SBD_WATCHDOG_DEV"] = watchdog
if device_list:
config["SBD_DEVICE"] = '"{0}"'.format(";".join(device_list))
- set_sbd_config(node_communicator, node, dict_to_environment_file(config))
- report_processor.process(
- reports.sbd_config_accepted_by_node(node.label)
- )
-
-
-def set_sbd_config_on_all_nodes(
- report_processor, node_communicator, node_list, config, watchdog_dict,
- device_dict
-):
- """
- Send SBD configuration 'config' to all nodes in 'node_list'. Option
- SBD_OPTS="-n <node_name>" is added automatically.
- Raises LibraryError with all ReportItems in case of any failure.
-
- report_processor --
- node_communicator -- NodeCommunicator
- node_list -- NodeAddressesList
- config -- dictionary in format: <SBD config option>: <value>
- watchdog_dict -- dictionary of watchdogs where key is NodeAdresses object
- and value is path to watchdog
- device_dict -- distionary with NodeAddresses as keys and lists of devices
- as values
- """
- report_processor.process(reports.sbd_config_distribution_started())
- _run_parallel_and_raise_lib_error_on_failure(
- set_sbd_config_on_node,
- [
- (
- [
- report_processor, node_communicator, node, config,
- watchdog_dict.get(node), device_dict.get(node)
- ],
- {}
- )
- for node in node_list
- ]
- )
-
-
-def enable_sbd_service(communicator, node):
- """
- Enable SBD service on 'node'.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- communicator.call_node(node, "remote/sbd_enable", None)
-
-
-def enable_sbd_service_on_node(report_processor, node_communicator, node):
- """
- Enable SBD service on 'node'.
- Returns list of ReportItem if there was any failure. Empty list otherwise.
-
- report_processor --
- node_communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- enable_sbd_service(node_communicator, node)
- report_processor.process(reports.service_enable_success("sbd", node.label))
-
-
-def enable_sbd_service_on_all_nodes(
- report_processor, node_communicator, node_list
-):
- """
- Enable SBD service on all nodes in 'node_list'.
- Raises LibraryError with all ReportItems in case of any failure.
-
- report_processor --
- node_communicator -- NodeCommunicator
- node_list -- NodeAddressesList
- """
- report_processor.process(reports.sbd_enabling_started())
- _run_parallel_and_raise_lib_error_on_failure(
- enable_sbd_service_on_node,
- [
- ([report_processor, node_communicator, node], {})
- for node in node_list
- ]
- )
-
-
-def disable_sbd_service(communicator, node):
- """
- Disable SBD service on 'node'.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- communicator.call_node(node, "remote/sbd_disable", None)
-
-
-def disable_sbd_service_on_node(report_processor, node_communicator, node):
- """
- Disable SBD service on 'node'.
-
- report_processor --
- node_communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- disable_sbd_service(node_communicator, node)
- report_processor.process(reports.service_disable_success("sbd", node.label))
-
-
-def disable_sbd_service_on_all_nodes(
- report_processor, node_communicator, node_list
-):
- """
- Disable SBD service on all nodes in 'node_list'.
- Raises LibraryError with all ReportItems in case of any failure.
-
- report_processor --
- node_communicator -- NodeCommunicator
- node_list -- NodeAddressesList
- """
- report_processor.process(reports.sbd_disabling_started())
- _run_parallel_and_raise_lib_error_on_failure(
- disable_sbd_service_on_node,
- [
- ([report_processor, node_communicator, node], {})
- for node in node_list
- ]
- )
-
-
-def set_stonith_watchdog_timeout_to_zero(communicator, node):
- """
- Set cluster property 'stonith-watchdog-timeout' to value '0' on 'node'.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- communicator.call_node(
- node, "remote/set_stonith_watchdog_timeout_to_zero", None
- )
-
-
-def set_stonith_watchdog_timeout_to_zero_on_all_nodes(
- node_communicator, node_list
-):
- """
- Sets cluster property 'stonith-watchdog-timeout' to value '0' an all nodes
- in 'node_list', even if cluster is not currently running on them (direct
- editing CIB file).
- Raises LibraryError with all ReportItems in case of any failure.
-
- node_communicator -- NodeCommunicator
- node_list -- NodeAddressesList
- """
- report_list = []
- for node in node_list:
- try:
- set_stonith_watchdog_timeout_to_zero(node_communicator, node)
- except NodeCommunicationException as e:
- report_list.append(node_communicator_exception_to_report_item(e))
- if report_list:
- raise LibraryError(*report_list)
-
-
-def remove_stonith_watchdog_timeout(communicator, node):
- """
- Remove cluster property 'stonith-watchdog-timeout' on 'node'.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", None)
-
-
-def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list):
- """
- Removes cluster property 'stonith-watchdog-timeout' from all nodes
- in 'node_list', even if cluster is not currently running on them (direct
- editing CIB file).
- Raises LibraryError with all ReportItems in case of any failure.
-
- node_communicator -- NodeCommunicator
- node_list -- NodeAddressesList
- """
- report_list = []
- for node in node_list:
- try:
- remove_stonith_watchdog_timeout(node_communicator, node)
- except NodeCommunicationException as e:
- report_list.append(node_communicator_exception_to_report_item(e))
- if report_list:
- raise LibraryError(*report_list)
+ return dict_to_environment_file(config)
def get_default_sbd_config():
@@ -476,17 +141,6 @@ def get_local_sbd_config():
))
-def get_sbd_config(communicator, node):
- """
- Get SBD configuration from 'node'.
- Returns SBD configuration string.
-
- communicator -- NodeCommunicator
- node -- NodeAddresses
- """
- return communicator.call_node(node, "remote/get_sbd_config", None)
-
-
def get_sbd_service_name():
return "sbd" if external.is_systemctl() else "sbd_helper"
diff --git a/pcs/lib/test/misc.py b/pcs/lib/test/misc.py
index be99bb2..276150a 100644
--- a/pcs/lib/test/misc.py
+++ b/pcs/lib/test/misc.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import logging
diff --git a/pcs/lib/test/test_cluster_conf_facade.py b/pcs/lib/test/test_cluster_conf_facade.py
index 44c6c58..9577599 100644
--- a/pcs/lib/test/test_cluster_conf_facade.py
+++ b/pcs/lib/test/test_cluster_conf_facade.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/test/test_env.py b/pcs/lib/test/test_env.py
index 027fe48..23915fb 100644
--- a/pcs/lib/test/test_env.py
+++ b/pcs/lib/test/test_env.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
@@ -10,29 +9,28 @@ import logging
from functools import partial
from lxml import etree
+from pcs.test.tools import fixture
from pcs.test.tools.assertions import (
assert_raise_library_error,
assert_xml_equal,
- assert_report_item_list_equal,
)
+from pcs.test.tools.command_env import get_env_tools
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.misc import get_test_resource as rc, create_patcher
from pcs.test.tools.pcs_unittest import mock
from pcs.lib.env import LibraryEnvironment
from pcs.common import report_codes
-from pcs.lib import reports
+from pcs.lib.node import NodeAddresses, NodeAddressesList
from pcs.lib.cluster_conf_facade import ClusterConfFacade
from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
-from pcs.lib.errors import (
- LibraryError,
- ReportItemSeverity as severity,
-)
+from pcs.lib.errors import ReportItemSeverity as severity
patch_env = create_patcher("pcs.lib.env")
patch_env_object = partial(mock.patch.object, LibraryEnvironment)
+
class LibraryEnvironmentTest(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
@@ -80,518 +78,6 @@ class LibraryEnvironmentTest(TestCase):
self.assertTrue(env.is_cman_cluster)
self.assertEqual(1, mock_is_cman.call_count)
- @patch_env("replace_cib_configuration_xml")
- @patch_env("get_cib_xml")
- def test_cib_set(self, mock_get_cib, mock_push_cib):
- cib_data = "test cib data"
- new_cib_data = "new test cib data"
- env = LibraryEnvironment(
- self.mock_logger,
- self.mock_reporter,
- cib_data=cib_data
- )
-
- self.assertFalse(env.is_cib_live)
-
- self.assertEqual(cib_data, env._get_cib_xml())
- self.assertEqual(0, mock_get_cib.call_count)
-
- env._push_cib_xml(new_cib_data)
- self.assertEqual(0, mock_push_cib.call_count)
-
- self.assertEqual(new_cib_data, env._get_cib_xml())
- self.assertEqual(0, mock_get_cib.call_count)
-
- @patch_env("replace_cib_configuration_xml")
- @patch_env("get_cib_xml")
- def test_cib_not_set(self, mock_get_cib, mock_push_cib):
- cib_data = "test cib data"
- new_cib_data = "new test cib data"
- mock_get_cib.return_value = cib_data
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- self.assertTrue(env.is_cib_live)
-
- self.assertEqual(cib_data, env._get_cib_xml())
- self.assertEqual(1, mock_get_cib.call_count)
-
- env._push_cib_xml(new_cib_data)
- self.assertEqual(1, mock_push_cib.call_count)
-
- @patch_env("ensure_cib_version")
- @patch_env("get_cib_xml")
- def test_get_cib_no_version_live(
- self, mock_get_cib_xml, mock_ensure_cib_version
- ):
- mock_get_cib_xml.return_value = '<cib/>'
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode())
- self.assertEqual(1, mock_get_cib_xml.call_count)
- self.assertEqual(0, mock_ensure_cib_version.call_count)
- self.assertFalse(env.cib_upgraded)
-
- @patch_env("ensure_cib_version")
- @patch_env("get_cib_xml")
- def test_get_cib_upgrade_live(
- self, mock_get_cib_xml, mock_ensure_cib_version
- ):
- mock_get_cib_xml.return_value = '<cib/>'
- mock_ensure_cib_version.return_value = etree.XML('<new_cib/>')
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- assert_xml_equal(
- '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
- )
- self.assertEqual(1, mock_get_cib_xml.call_count)
- self.assertEqual(1, mock_ensure_cib_version.call_count)
- assert_report_item_list_equal(
- env.report_processor.report_item_list,
- [(
- severity.INFO,
- report_codes.CIB_UPGRADE_SUCCESSFUL,
- {}
- )]
- )
- self.assertTrue(env.cib_upgraded)
-
- @patch_env("ensure_cib_version")
- @patch_env("get_cib_xml")
- def test_get_cib_no_upgrade_live(
- self, mock_get_cib_xml, mock_ensure_cib_version
- ):
- mock_get_cib_xml.return_value = '<cib/>'
- mock_ensure_cib_version.return_value = None
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- assert_xml_equal(
- '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
- )
- self.assertEqual(1, mock_get_cib_xml.call_count)
- self.assertEqual(1, mock_ensure_cib_version.call_count)
- self.assertFalse(env.cib_upgraded)
-
- @patch_env("ensure_cib_version")
- @patch_env("get_cib_xml")
- def test_get_cib_no_version_file(
- self, mock_get_cib_xml, mock_ensure_cib_version
- ):
- env = LibraryEnvironment(
- self.mock_logger, self.mock_reporter, cib_data='<cib/>'
- )
- assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode())
- self.assertEqual(0, mock_get_cib_xml.call_count)
- self.assertEqual(0, mock_ensure_cib_version.call_count)
- self.assertFalse(env.cib_upgraded)
-
- @patch_env("ensure_cib_version")
- @patch_env("get_cib_xml")
- def test_get_cib_upgrade_file(
- self, mock_get_cib_xml, mock_ensure_cib_version
- ):
- mock_ensure_cib_version.return_value = etree.XML('<new_cib/>')
- env = LibraryEnvironment(
- self.mock_logger, self.mock_reporter, cib_data='<cib/>'
- )
- assert_xml_equal(
- '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
- )
- self.assertEqual(0, mock_get_cib_xml.call_count)
- self.assertEqual(1, mock_ensure_cib_version.call_count)
- self.assertTrue(env.cib_upgraded)
-
- @patch_env("ensure_cib_version")
- @patch_env("get_cib_xml")
- def test_get_cib_no_upgrade_file(
- self, mock_get_cib_xml, mock_ensure_cib_version
- ):
- mock_ensure_cib_version.return_value = None
- env = LibraryEnvironment(
- self.mock_logger, self.mock_reporter, cib_data='<cib/>'
- )
- assert_xml_equal(
- '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
- )
- self.assertEqual(0, mock_get_cib_xml.call_count)
- self.assertEqual(1, mock_ensure_cib_version.call_count)
- self.assertFalse(env.cib_upgraded)
-
- @patch_env("replace_cib_configuration_xml")
- @mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: "mock cmd runner"
- )
- def test_push_cib_not_upgraded_live(self, mock_replace_cib):
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- env.push_cib(etree.XML('<cib/>'))
- mock_replace_cib.assert_called_once_with(
- "mock cmd runner",
- '<cib/>'
- )
- self.assertEqual([], env.report_processor.report_item_list)
-
- @patch_env("replace_cib_configuration_xml")
- @mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: "mock cmd runner"
- )
- def test_push_cib_upgraded_live(self, mock_replace_cib):
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- env._cib_upgraded = True
- env.push_cib(etree.XML('<cib/>'))
- mock_replace_cib.assert_called_once_with(
- "mock cmd runner",
- '<cib/>'
- )
- self.assertFalse(env.cib_upgraded)
-
- @patch_env("qdevice_reload_on_nodes")
- @patch_env("check_corosync_offline_on_nodes")
- @patch_env("reload_corosync_config")
- @patch_env("distribute_corosync_conf")
- @patch_env("get_local_corosync_conf")
- @mock.patch.object(
- LibraryEnvironment,
- "node_communicator",
- lambda self: "mock node communicator"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: "mock cmd runner"
- )
- def test_corosync_conf_set(
- self, mock_get_corosync, mock_distribute, mock_reload,
- mock_check_offline, mock_qdevice_reload
- ):
- corosync_data = "totem {\n version: 2\n}\n"
- new_corosync_data = "totem {\n version: 3\n}\n"
- env = LibraryEnvironment(
- self.mock_logger,
- self.mock_reporter,
- corosync_conf_data=corosync_data
- )
-
- self.assertFalse(env.is_corosync_conf_live)
-
- self.assertEqual(corosync_data, env.get_corosync_conf_data())
- self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
- self.assertEqual(0, mock_get_corosync.call_count)
-
- env.push_corosync_conf(
- CorosyncConfigFacade.from_string(new_corosync_data)
- )
- self.assertEqual(0, mock_distribute.call_count)
-
- self.assertEqual(new_corosync_data, env.get_corosync_conf_data())
- self.assertEqual(0, mock_get_corosync.call_count)
- mock_check_offline.assert_not_called()
- mock_reload.assert_not_called()
- mock_qdevice_reload.assert_not_called()
-
- @patch_env("qdevice_reload_on_nodes")
- @patch_env("reload_corosync_config")
- @patch_env("is_service_running")
- @patch_env("distribute_corosync_conf")
- @patch_env("get_local_corosync_conf")
- @mock.patch.object(
- CorosyncConfigFacade,
- "get_nodes",
- lambda self: "mock node list"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "node_communicator",
- lambda self: "mock node communicator"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: "mock cmd runner"
- )
- def test_corosync_conf_not_set_online(
- self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
- mock_qdevice_reload
- ):
- corosync_data = open(rc("corosync.conf")).read()
- new_corosync_data = corosync_data.replace("version: 2", "version: 3")
- mock_get_corosync.return_value = corosync_data
- mock_is_running.return_value = True
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- self.assertTrue(env.is_corosync_conf_live)
-
- self.assertEqual(corosync_data, env.get_corosync_conf_data())
- self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
- self.assertEqual(2, mock_get_corosync.call_count)
-
- env.push_corosync_conf(
- CorosyncConfigFacade.from_string(new_corosync_data)
- )
- mock_distribute.assert_called_once_with(
- "mock node communicator",
- self.mock_reporter,
- "mock node list",
- new_corosync_data,
- False
- )
- mock_is_running.assert_called_once_with("mock cmd runner", "corosync")
- mock_reload.assert_called_once_with("mock cmd runner")
- mock_qdevice_reload.assert_not_called()
-
- @patch_env("qdevice_reload_on_nodes")
- @patch_env("reload_corosync_config")
- @patch_env("is_service_running")
- @patch_env("distribute_corosync_conf")
- @patch_env("get_local_corosync_conf")
- @mock.patch.object(
- CorosyncConfigFacade,
- "get_nodes",
- lambda self: "mock node list"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "node_communicator",
- lambda self: "mock node communicator"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: "mock cmd runner"
- )
- def test_corosync_conf_not_set_offline(
- self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
- mock_qdevice_reload
- ):
- corosync_data = open(rc("corosync.conf")).read()
- new_corosync_data = corosync_data.replace("version: 2", "version: 3")
- mock_get_corosync.return_value = corosync_data
- mock_is_running.return_value = False
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- self.assertTrue(env.is_corosync_conf_live)
-
- self.assertEqual(corosync_data, env.get_corosync_conf_data())
- self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
- self.assertEqual(2, mock_get_corosync.call_count)
-
- env.push_corosync_conf(
- CorosyncConfigFacade.from_string(new_corosync_data)
- )
- mock_distribute.assert_called_once_with(
- "mock node communicator",
- self.mock_reporter,
- "mock node list",
- new_corosync_data,
- False
- )
- mock_is_running.assert_called_once_with("mock cmd runner", "corosync")
- mock_reload.assert_not_called()
- mock_qdevice_reload.assert_not_called()
-
- @patch_env("qdevice_reload_on_nodes")
- @patch_env("check_corosync_offline_on_nodes")
- @patch_env("reload_corosync_config")
- @patch_env("is_service_running")
- @patch_env("distribute_corosync_conf")
- @patch_env("get_local_corosync_conf")
- @mock.patch.object(
- CorosyncConfigFacade,
- "get_nodes",
- lambda self: "mock node list"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "node_communicator",
- lambda self: "mock node communicator"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "cmd_runner",
- lambda self: "mock cmd runner"
- )
- def test_corosync_conf_not_set_need_qdevice_reload_success(
- self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
- mock_check_offline, mock_qdevice_reload
- ):
- corosync_data = open(rc("corosync.conf")).read()
- new_corosync_data = corosync_data.replace("version: 2", "version: 3")
- mock_get_corosync.return_value = corosync_data
- mock_is_running.return_value = True
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- self.assertTrue(env.is_corosync_conf_live)
-
- self.assertEqual(corosync_data, env.get_corosync_conf_data())
- self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
- self.assertEqual(2, mock_get_corosync.call_count)
-
- conf_facade = CorosyncConfigFacade.from_string(new_corosync_data)
- conf_facade._need_qdevice_reload = True
- env.push_corosync_conf(conf_facade)
- mock_check_offline.assert_not_called()
- mock_distribute.assert_called_once_with(
- "mock node communicator",
- self.mock_reporter,
- "mock node list",
- new_corosync_data,
- False
- )
- mock_reload.assert_called_once_with("mock cmd runner")
- mock_qdevice_reload.assert_called_once_with(
- "mock node communicator",
- self.mock_reporter,
- "mock node list",
- False
- )
-
- @patch_env("qdevice_reload_on_nodes")
- @patch_env("check_corosync_offline_on_nodes")
- @patch_env("reload_corosync_config")
- @patch_env("is_service_running")
- @patch_env("distribute_corosync_conf")
- @patch_env("get_local_corosync_conf")
- @mock.patch.object(
- CorosyncConfigFacade,
- "get_nodes",
- lambda self: "mock node list"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "node_communicator",
- lambda self: "mock node communicator"
- )
- def test_corosync_conf_not_set_need_offline_success(
- self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
- mock_check_offline, mock_qdevice_reload
- ):
- corosync_data = open(rc("corosync.conf")).read()
- new_corosync_data = corosync_data.replace("version: 2", "version: 3")
- mock_get_corosync.return_value = corosync_data
- mock_is_running.return_value = False
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- self.assertTrue(env.is_corosync_conf_live)
-
- self.assertEqual(corosync_data, env.get_corosync_conf_data())
- self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
- self.assertEqual(2, mock_get_corosync.call_count)
-
- conf_facade = CorosyncConfigFacade.from_string(new_corosync_data)
- conf_facade._need_stopped_cluster = True
- env.push_corosync_conf(conf_facade)
- mock_check_offline.assert_called_once_with(
- "mock node communicator",
- self.mock_reporter,
- "mock node list",
- False
- )
- mock_distribute.assert_called_once_with(
- "mock node communicator",
- self.mock_reporter,
- "mock node list",
- new_corosync_data,
- False
- )
- mock_reload.assert_not_called()
- mock_qdevice_reload.assert_not_called()
-
- @patch_env("qdevice_reload_on_nodes")
- @patch_env("check_corosync_offline_on_nodes")
- @patch_env("reload_corosync_config")
- @patch_env("distribute_corosync_conf")
- @patch_env("get_local_corosync_conf")
- @mock.patch.object(
- CorosyncConfigFacade,
- "get_nodes",
- lambda self: "mock node list"
- )
- @mock.patch.object(
- LibraryEnvironment,
- "node_communicator",
- lambda self: "mock node communicator"
- )
- def test_corosync_conf_not_set_need_offline_fail(
- self, mock_get_corosync, mock_distribute, mock_reload,
- mock_check_offline, mock_qdevice_reload
- ):
- corosync_data = open(rc("corosync.conf")).read()
- new_corosync_data = corosync_data.replace("version: 2", "version: 3")
- mock_get_corosync.return_value = corosync_data
- def raiser(dummy_communicator, dummy_reporter, dummy_nodes, dummy_force):
- raise LibraryError(
- reports.corosync_not_running_check_node_error("test node")
- )
- mock_check_offline.side_effect = raiser
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-
- self.assertTrue(env.is_corosync_conf_live)
-
- self.assertEqual(corosync_data, env.get_corosync_conf_data())
- self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
- self.assertEqual(2, mock_get_corosync.call_count)
-
- conf_facade = CorosyncConfigFacade.from_string(new_corosync_data)
- conf_facade._need_stopped_cluster = True
- assert_raise_library_error(
- lambda: env.push_corosync_conf(conf_facade),
- (
- severity.ERROR,
- report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
- {"node": "test node"}
- )
- )
- mock_check_offline.assert_called_once_with(
- "mock node communicator",
- self.mock_reporter,
- "mock node list",
- False
- )
- mock_distribute.assert_not_called()
- mock_reload.assert_not_called()
- mock_qdevice_reload.assert_not_called()
-
- @patch_env("NodeCommunicator")
- def test_node_communicator_no_options(self, mock_comm):
- expected_comm = mock.MagicMock()
- mock_comm.return_value = expected_comm
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- comm = env.node_communicator()
- self.assertEqual(expected_comm, comm)
- mock_comm.assert_called_once_with(
- self.mock_logger,
- self.mock_reporter,
- {},
- None,
- [],
- None
- )
-
- @patch_env("NodeCommunicator")
- def test_node_communicator_all_options(self, mock_comm):
- expected_comm = mock.MagicMock()
- mock_comm.return_value = expected_comm
- user = "testuser"
- groups = ["some", "group"]
- tokens = {"node": "token"}
- timeout = 10
- env = LibraryEnvironment(
- self.mock_logger,
- self.mock_reporter,
- user_login=user,
- user_groups=groups,
- auth_tokens_getter=lambda:tokens,
- request_timeout=timeout
- )
- comm = env.node_communicator()
- self.assertEqual(expected_comm, comm)
- mock_comm.assert_called_once_with(
- self.mock_logger,
- self.mock_reporter,
- tokens,
- user,
- groups,
- timeout
- )
@patch_env("get_local_cluster_conf")
def test_get_cluster_conf_live(self, mock_get_local_cluster_conf):
@@ -673,7 +159,7 @@ class CmdRunner(TestCase):
}
)
- @patch_env("tempfile.NamedTemporaryFile")
+ @patch_env("write_tmpfile")
def test_dump_cib_file(self, mock_tmpfile, mock_runner):
expected_runner = mock.MagicMock()
mock_runner.return_value = expected_runner
@@ -695,7 +181,8 @@ class CmdRunner(TestCase):
"CIB_file": rc("file.tmp"),
}
)
- mock_instance.write.assert_called_once_with("<cib />")
+ mock_tmpfile.assert_called_once_with("<cib />")
+
@patch_env_object("cmd_runner", lambda self: "runner")
class EnsureValidWait(TestCase):
@@ -714,7 +201,6 @@ class EnsureValidWait(TestCase):
def env_fake(self):
return self.create_env(cib_data="<cib/>")
-
def test_not_raises_if_waiting_false_no_matter_if_env_is_live(self):
self.env_live.ensure_wait_satisfiable(False)
self.env_fake.ensure_wait_satisfiable(False)
@@ -726,7 +212,8 @@ class EnsureValidWait(TestCase):
(
severity.ERROR,
report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER,
- {}
+ {},
+ None
)
)
@@ -739,23 +226,665 @@ class EnsureValidWait(TestCase):
get_valid_timeout.assert_called_once_with(10)
- at patch_env_object("cmd_runner", lambda self: "runner")
- at patch_env_object("_get_wait_timeout", lambda self, wait: wait)
- at patch_env_object("_push_cib_xml")
- at patch_env("wait_for_idle")
-class PushCib(TestCase):
+class PushCorosyncConfLiveBase(TestCase):
def setUp(self):
- self.env = LibraryEnvironment(
- mock.MagicMock(logging.Logger),
- MockLibraryReportProcessor()
+ self.env_assistant, self.config = get_env_tools(self)
+ self.corosync_conf_facade = mock.MagicMock(CorosyncConfigFacade)
+ self.corosync_conf_text = "corosync conf"
+ self.corosync_conf_facade.config.export.return_value = (
+ self.corosync_conf_text
+ )
+ self.corosync_conf_facade.get_nodes.return_value = NodeAddressesList([
+ NodeAddresses("node-1"),
+ NodeAddresses("node-2"),
+ ])
+ self.corosync_conf_facade.need_stopped_cluster = False
+ self.corosync_conf_facade.need_qdevice_reload = False
+ self.node_label_list = [
+ dict(label="node-1"),
+ dict(label="node-2"),
+ ]
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
+ def test_dont_need_stopped_cluster(self, mock_is_systemctl):
+ mock_is_systemctl.return_value = True
+ (self.config
+ .http.add_communication(
+ "distribute_corosync_conf",
+ self.node_label_list,
+ action="remote/set_corosync_conf",
+ param_list=[("corosync_conf", self.corosync_conf_text)],
+ response_code=200,
+ output="Succeeded",
+ )
+ .runner.systemctl.is_active("corosync")
+ .runner.corosync.reload()
+ )
+ self.env_assistant.get_env().push_corosync_conf(
+ self.corosync_conf_facade
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-2",
+ ),
+ fixture.info(report_codes.COROSYNC_CONFIG_RELOADED)
+ ])
+
+ def test_need_stopped_cluster(self, mock_is_systemctl):
+ mock_is_systemctl.return_value = True
+ self.corosync_conf_facade.need_stopped_cluster = True
+ (self.config
+ .http.add_communication(
+ "status",
+ self.node_label_list,
+ action="remote/status",
+ response_code=200,
+ output="""
+{"uptime":"0 days, 05:07:39","corosync":false,"pacemaker":false,"cman":false,\
+"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
+"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
+"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
+"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
+"resources":[],"groups":[],"constraints":{},"cluster_settings":{"error":\
+"Unable to get configuration settings"},"node_id":"","node_attr":{},\
+"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
+false,"acls":{},"username":"hacluster"}
+ """,
+ )
+ .http.add_communication(
+ "set_corosync_conf",
+ self.node_label_list,
+ action="remote/set_corosync_conf",
+ param_list=[("corosync_conf", self.corosync_conf_text)],
+ response_code=200,
+ output="Succeeded",
+ )
+ .runner.systemctl.is_active("corosync", is_active=False)
+ )
+ self.env_assistant.get_env().push_corosync_conf(
+ self.corosync_conf_facade
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ node="node-1",
+ ),
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ node="node-2",
+ ),
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-2",
+ ),
+ ])
+
+ def test_need_stopped_cluster_not_stopped(self, mock_is_systemctl):
+ self.corosync_conf_facade.need_stopped_cluster = True
+ mock_is_systemctl.return_value = True
+ (self.config
+ .http.add_communication(
+ "status",
+ self.node_label_list,
+ action="remote/status",
+ response_code=200,
+ output="""
+{"uptime":"0 days, 06:29:36","corosync":true,"pacemaker":true,"cman":false,\
+"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
+"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":\
+["node-1","node-2"],"corosync_offline":[],"pacemaker_online":["node-1",\
+"node-2"],"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":\
+"cluster_name","resources":[],"groups":[],"constraints":{},"cluster_settings":\
+{"have-watchdog":"false","dc-version":"1.1.16-11.el7-94ff4df",\
+"cluster-infrastructure":"corosync","cluster-name":"cluster_name"},\
+"node_id":"1","node_attr":{},"fence_levels":{},"need_ring1_address":false,\
+"is_cman_with_udpu_transport":false,"acls":{"role":{},"group":{},"user":{},\
+"target":{}},"username":"hacluster"}
+ """,
+ )
)
+ env = self.env_assistant.get_env()
+ self.env_assistant.assert_raise_library_error(
+ lambda: env.push_corosync_conf(self.corosync_conf_facade),
+ []
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
+ fixture.error(
+ report_codes.COROSYNC_RUNNING_ON_NODE,
+ node="node-1",
+ ),
+ fixture.error(
+ report_codes.COROSYNC_RUNNING_ON_NODE,
+ node="node-2",
+ ),
+ ])
+
+ def test_need_stopped_cluster_not_stopped_skip_offline(
+ self, mock_is_systemctl
+ ):
+ mock_is_systemctl.return_value = True
+ self.corosync_conf_facade.need_stopped_cluster = True
+ (self.config
+ .http.add_communication(
+ "status",
+ [
+ dict(
+ label="node-1",
+ output="""\
+{"uptime":"0 days, 06:36:00","corosync":true,"pacemaker":true,"cman":false,\
+"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
+"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":\
+["node-1"],"corosync_offline":["node-2"],"pacemaker_online":["node-1"],\
+"pacemaker_offline":["node-2"],"pacemaker_standby":[],"cluster_name":\
+"cluster_name","resources":[],"groups":[],"constraints":{},"cluster_settings":\
+{"have-watchdog":"false","dc-version":"1.1.16-11.el7-94ff4df",\
+"cluster-infrastructure":"corosync","cluster-name":"cluster_name"},\
+"node_id":"1","node_attr":{},"fence_levels":{},"need_ring1_address":false,\
+"is_cman_with_udpu_transport":false,"acls":{"role":{},"group":{},"user":{},\
+"target":{}},"username":"hacluster"}
+ """,
+ ),
+ dict(
+ label="node-2",
+ output="""\
+{"uptime":"0 days, 06:35:58","corosync":false,"pacemaker":false,"cman":false,\
+"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
+"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
+"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
+"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
+"resources":[],"groups":[],"constraints":{},"cluster_settings":\
+{"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\
+"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
+false,"acls":{},"username":"hacluster"}
+ """,
+ ),
+ ],
+ action="remote/status",
+ response_code=200,
+ )
+ )
+ env = self.env_assistant.get_env()
+ self.env_assistant.assert_raise_library_error(
+ lambda: env.push_corosync_conf(
+ self.corosync_conf_facade, skip_offline_nodes=True
+ ),
+ []
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
+ fixture.error(
+ report_codes.COROSYNC_RUNNING_ON_NODE,
+ node="node-1",
+ ),
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ node="node-2",
+ )
+ ])
- def test_run_only_push_when_without_wait(self, wait_for_idle, push_cib_xml):
- self.env.push_cib(etree.fromstring("<cib/>"))
- push_cib_xml.assert_called_once_with("<cib/>")
- wait_for_idle.assert_not_called()
+ def test_need_stopped_cluster_comunnication_failure(
+ self, mock_is_systemctl
+ ):
+ mock_is_systemctl.return_value = True
+ self.corosync_conf_facade.need_stopped_cluster = True
+ (self.config
+ .http.add_communication(
+ "status",
+ [
+ dict(
+ label="node-1",
+ response_code=200,
+ output="""\
+{"uptime":"0 days, 00:11:52","corosync":false,"pacemaker":false,"cman":false,\
+"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
+"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
+"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
+"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
+"resources":[],"groups":[],"constraints":{},"cluster_settings":\
+{"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\
+"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
+false,"acls":{},"username":"hacluster"}
+ """,
+ ),
+ dict(
+ label="node-2",
+ response_code=401,
+ output="""{"notauthorized":"true"}"""
+ ),
+ ],
+ action="remote/status",
+ )
+ )
+ env = self.env_assistant.get_env()
+ self.env_assistant.assert_raise_library_error(
+ lambda: env.push_corosync_conf(self.corosync_conf_facade),
+ []
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ node="node-1",
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-2",
+ ),
+ fixture.error(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-2",
+ ),
+ ])
- def test_run_wait_when_wait_specified(self, wait_for_idle, push_cib_xml):
- self.env.push_cib(etree.fromstring("<cib/>"), 10)
- push_cib_xml.assert_called_once_with("<cib/>")
- wait_for_idle.assert_called_once_with(self.env.cmd_runner(), 10)
+ def test_need_stopped_cluster_comunnication_failure_skip_offline(
+ self, mock_is_systemctl
+ ):
+ mock_is_systemctl.return_value = True
+ self.corosync_conf_facade.need_stopped_cluster = True
+ (self.config
+ .http.add_communication(
+ "status",
+ [
+ dict(
+ label="node-1",
+ response_code=200,
+ output="""\
+{"uptime":"0 days, 00:11:52","corosync":false,"pacemaker":false,"cman":false,\
+"corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\
+"pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\
+"corosync_offline":["node-1","node-2"],"pacemaker_online":[],\
+"pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\
+"resources":[],"groups":[],"constraints":{},"cluster_settings":\
+{"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\
+"fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\
+false,"acls":{},"username":"hacluster"}
+ """,
+ ),
+ dict(
+ label="node-2",
+ response_code=401,
+ output="""{"notauthorized":"true"}"""
+ ),
+ ],
+ action="remote/status",
+ )
+ .http.add_communication(
+ "set_corosync_conf",
+ [
+ dict(
+ label="node-1",
+ response_code=200,
+ output="Succeeded",
+ ),
+ dict(
+ label="node-2",
+ response_code=401,
+ output="""{"notauthorized":"true"}""",
+ )
+ ],
+ action="remote/set_corosync_conf",
+ param_list=[("corosync_conf", self.corosync_conf_text)],
+ )
+ .runner.systemctl.is_active("corosync", is_active=False)
+ )
+ self.env_assistant.get_env().push_corosync_conf(
+ self.corosync_conf_facade, skip_offline_nodes=True
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+ node="node-1",
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ node="node-2",
+ reason="HTTP error: 401",
+ command="remote/status",
+ ),
+ fixture.warn(
+ report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+ node="node-2",
+ ),
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ node="node-2",
+ reason="HTTP error: 401",
+ command="remote/set_corosync_conf",
+ ),
+ fixture.warn(
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ node="node-2",
+ ),
+ ])
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
+ def test_qdevice_reload(self, mock_is_systemctl):
+ mock_is_systemctl.return_value = True
+ self.corosync_conf_facade.need_qdevice_reload = True
+ (self.config
+ .http.add_communication(
+ "set_corosync_conf",
+ self.node_label_list,
+ action="remote/set_corosync_conf",
+ param_list=[("corosync_conf", self.corosync_conf_text)],
+ response_code=200,
+ output="Succeeded",
+ )
+ .runner.systemctl.is_active("corosync", is_active=False)
+ .http.add_communication(
+ "qdevice_client_stop",
+ self.node_label_list,
+ action="remote/qdevice_client_stop",
+ response_code=200,
+ output="corosync-qdevice stopped",
+ )
+ .http.add_communication(
+ "qdevice_client_start",
+ self.node_label_list,
+ action="remote/qdevice_client_start",
+ response_code=200,
+ output="corosync-qdevice started",
+ )
+ )
+ self.env_assistant.get_env().push_corosync_conf(
+ self.corosync_conf_facade
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-2",
+ ),
+ fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node="node-1",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node="node-2",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ fixture.info(
+ report_codes.SERVICE_START_SUCCESS,
+ node="node-1",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ fixture.info(
+ report_codes.SERVICE_START_SUCCESS,
+ node="node-2",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ ])
+
+ def test_qdevice_reload_failures(self, mock_is_systemctl):
+ mock_is_systemctl.return_value = True
+ self.corosync_conf_facade.need_qdevice_reload = True
+ (self.config
+ .http.add_communication(
+ "set_corosync_conf",
+ self.node_label_list,
+ action="remote/set_corosync_conf",
+ param_list=[("corosync_conf", self.corosync_conf_text)],
+ response_code=200,
+ output="Succeeded",
+ )
+ .runner.systemctl.is_active("corosync", is_active=False)
+ .http.add_communication(
+ "qdevice_client_stop",
+ [
+ dict(
+ label="node-1",
+ response_code=200,
+ output="corosync-qdevice stopped",
+ ),
+ dict(
+ label="node-2",
+ response_code=400,
+ output="error",
+ ),
+ ],
+ action="remote/qdevice_client_stop",
+ )
+ .http.add_communication(
+ "qdevice_client_start",
+ [
+ dict(
+ label="node-1",
+ errno=8,
+ error_msg="failure",
+ was_connected=False,
+ ),
+ dict(
+ label="node-2",
+ response_code=200,
+ output="corosync-qdevice started",
+ ),
+ ],
+ action="remote/qdevice_client_start",
+ )
+ )
+ env = self.env_assistant.get_env()
+ self.env_assistant.assert_raise_library_error(
+ lambda: env.push_corosync_conf(self.corosync_conf_facade),
+ []
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-2",
+ ),
+ fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node="node-1",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-2",
+ reason="error",
+ command="remote/qdevice_client_stop",
+ ),
+ fixture.error(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ force_code=report_codes.SKIP_OFFLINE_NODES,
+ node="node-1",
+ reason="failure",
+ command="remote/qdevice_client_start",
+ ),
+ fixture.info(
+ report_codes.SERVICE_START_SUCCESS,
+ node="node-2",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ ])
+
+ def test_qdevice_reload_failures_skip_offline(self, mock_is_systemctl):
+ mock_is_systemctl.return_value = True
+ self.corosync_conf_facade.need_qdevice_reload = True
+ (self.config
+ .http.add_communication(
+ "set_corosync_conf",
+ [
+ dict(
+ label="node-1",
+ response_code=200,
+ output="Succeeded",
+ ),
+ dict(
+ label="node-2",
+ errno=8,
+ error_msg="failure",
+ was_connected=False,
+ ),
+ ],
+ action="remote/set_corosync_conf",
+ param_list=[("corosync_conf", self.corosync_conf_text)],
+ )
+ .runner.systemctl.is_active("corosync", is_active=False)
+ .http.add_communication(
+ "qdevice_client_stop",
+ [
+ dict(
+ label="node-1",
+ response_code=200,
+ output="corosync-qdevice stopped",
+ ),
+ dict(
+ label="node-2",
+ response_code=400,
+ output="error",
+ ),
+ ],
+ action="remote/qdevice_client_stop",
+ )
+ .http.add_communication(
+ "qdevice_client_start",
+ [
+ dict(
+ label="node-1",
+ errno=8,
+ error_msg="failure",
+ was_connected=False,
+ ),
+ dict(
+ label="node-2",
+ response_code=200,
+ output="corosync-qdevice started",
+ ),
+ ],
+ action="remote/qdevice_client_start",
+ )
+ )
+ env = self.env_assistant.get_env()
+ env.push_corosync_conf(
+ self.corosync_conf_facade, skip_offline_nodes=True
+ )
+ self.env_assistant.assert_reports([
+ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED),
+ fixture.info(
+ report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+ node="node-1",
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node="node-2",
+ reason="failure",
+ command="remote/set_corosync_conf",
+ ),
+ fixture.warn(
+ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+ node="node-2",
+ ),
+ fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
+ fixture.info(
+ report_codes.SERVICE_STOP_SUCCESS,
+ node="node-1",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ node="node-2",
+ reason="error",
+ command="remote/qdevice_client_stop",
+ ),
+ fixture.warn(
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ node="node-1",
+ reason="failure",
+ command="remote/qdevice_client_start",
+ ),
+ fixture.info(
+ report_codes.SERVICE_START_SUCCESS,
+ node="node-2",
+ service="corosync-qdevice",
+ instance=None,
+ ),
+ ])
+
+
+class PushCorosyncConfFile(TestCase):
+ def setUp(self):
+ self.env_assistant, self.config = get_env_tools(test_case=self)
+ self.config.env.set_corosync_conf_data("totem {\n version: 2\n}\n")
+
+ def test_success(self):
+ env = self.env_assistant.get_env()
+ new_corosync_conf_data = "totem {\n version: 3\n}\n"
+ env.push_corosync_conf(
+ CorosyncConfigFacade.from_string(new_corosync_conf_data)
+ )
+ self.assertEqual(new_corosync_conf_data, env.get_corosync_conf_data())
+
+
+class GetCorosyncConfFile(TestCase):
+ def setUp(self):
+ self.corosync_conf_data = "totem {\n version: 2\n}\n"
+ self.env_assistant, self.config = get_env_tools(test_case=self)
+ self.config.env.set_corosync_conf_data(self.corosync_conf_data)
+
+ def test_success(self):
+ env = self.env_assistant.get_env()
+ self.assertFalse(env.is_corosync_conf_live)
+ self.assertEqual(self.corosync_conf_data, env.get_corosync_conf_data())
+ self.assertEqual(
+ self.corosync_conf_data, env.get_corosync_conf().config.export()
+ )
+
+
+class GetCorosyncConfLive(TestCase):
+ def setUp(self):
+ self.env_assistant, self.config = get_env_tools(self)
+
+ def test_success(self):
+ corosync_conf_data = "totem {\n version: 2\n}\n"
+ self.config.corosync_conf.load_content(corosync_conf_data)
+ env = self.env_assistant.get_env()
+ self.assertTrue(env.is_corosync_conf_live)
+ self.assertEqual(
+ corosync_conf_data, env.get_corosync_conf().config.export()
+ )
diff --git a/pcs/lib/test/test_env_cib.py b/pcs/lib/test/test_env_cib.py
new file mode 100644
index 0000000..33d4812
--- /dev/null
+++ b/pcs/lib/test/test_env_cib.py
@@ -0,0 +1,448 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import logging
+from functools import partial
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.test.tools import fixture
+from pcs.test.tools.assertions import assert_xml_equal
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import get_test_resource as rc, create_patcher
+from pcs.test.tools.pcs_unittest import TestCase, mock
+from pcs.test.tools.xml import etree_to_str
+
+
+patch_env = create_patcher("pcs.lib.env")
+patch_env_object = partial(mock.patch.object, LibraryEnvironment)
+
+def mock_tmpfile(filename):
+ mock_file = mock.MagicMock()
+ mock_file.name = rc(filename)
+ return mock_file
+
+ at patch_env_object("push_cib_diff")
+ at patch_env_object("push_cib_full")
+class CibPushProxy(TestCase):
+ def setUp(self):
+ self.env = LibraryEnvironment(
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+ )
+ get_cib_patcher = patch_env_object(
+ "get_cib",
+ lambda self: "<cib />"
+ )
+ self.addCleanup(get_cib_patcher.stop)
+ get_cib_patcher.start()
+
+ def test_push_loaded(self, mock_push_full, mock_push_diff):
+ self.env.get_cib()
+ self.env.push_cib()
+ mock_push_full.assert_not_called()
+ mock_push_diff.assert_called_once_with(False)
+
+ def test_push_loaded_wait(self, mock_push_full, mock_push_diff):
+ self.env.get_cib()
+ self.env.push_cib(wait=10)
+ mock_push_full.assert_not_called()
+ mock_push_diff.assert_called_once_with(10)
+
+ def test_push_custom(self, mock_push_full, mock_push_diff):
+ self.env.get_cib()
+ self.env.push_cib(custom_cib="<cib />")
+ mock_push_full.assert_called_once_with("<cib />", False)
+ mock_push_diff.assert_not_called()
+
+ def test_push_custom_wait(self, mock_push_full, mock_push_diff):
+ self.env.get_cib()
+ self.env.push_cib(custom_cib="<cib />", wait=10)
+ mock_push_full.assert_called_once_with("<cib />", 10)
+ mock_push_diff.assert_not_called()
+
+
+class IsCibLive(TestCase):
+ def test_is_live_when_no_cib_data_specified(self):
+ env_assist, _ = get_env_tools(test_case=self)
+ self.assertTrue(env_assist.get_env().is_cib_live)
+
+ def test_is_not_live_when_cib_data_specified(self):
+ env_assist, config = get_env_tools(test_case=self)
+ config.env.set_cib_data("<cib/>")
+ self.assertFalse(env_assist.get_env().is_cib_live)
+
+
+class WaitSupportWithLiveCib(TestCase):
+ wait_timeout = 10
+
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+ self.config.runner.cib.load()
+
+ def test_supports_timeout(self):
+ (self.config
+ .runner.pcmk.can_wait()
+ .runner.cib.push()
+ .runner.pcmk.wait(timeout=self.wait_timeout)
+ )
+
+ env = self.env_assist.get_env()
+ env.get_cib()
+ env.push_cib_full(wait=self.wait_timeout)
+
+ self.env_assist.assert_reports([])
+
+ def test_does_not_support_timeout_without_pcmk_support(self):
+ self.config.runner.pcmk.can_wait(stdout="cannot wait")
+
+ env = self.env_assist.get_env()
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ lambda: env.push_cib_full(wait=self.wait_timeout),
+ [
+ fixture.error(report_codes.WAIT_FOR_IDLE_NOT_SUPPORTED),
+ ],
+ expected_in_processor=False
+ )
+
+ def test_raises_on_invalid_value(self):
+ self.config.runner.pcmk.can_wait()
+
+ env = self.env_assist.get_env()
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ lambda: env.push_cib_full(wait="abc"),
+ [
+ fixture.error(
+ report_codes.INVALID_TIMEOUT_VALUE,
+ timeout="abc"
+ ),
+ ],
+ expected_in_processor=False
+ )
+
+
+class WaitSupportWithMockedCib(TestCase):
+ def test_does_not_suport_timeout(self):
+ env_assist, config = get_env_tools(test_case=self)
+ (config
+ .env.set_cib_data("<cib/>")
+ .runner.cib.load()
+ )
+
+ env = env_assist.get_env()
+ env.get_cib()
+ env_assist.assert_raise_library_error(
+ lambda: env.push_cib_full(wait=10),
+ [
+ fixture.error(report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER),
+ ],
+ expected_in_processor=False
+ )
+
+
+class MangeCibAssertionMixin(object):
+ def assert_raises_cib_error(self, callable_obj, message):
+ with self.assertRaises(AssertionError) as context_manager:
+ callable_obj()
+ self.assertEqual(str(context_manager.exception), message)
+
+ def assert_raises_cib_not_loaded(self, callable_obj):
+ self.assert_raises_cib_error(callable_obj, "CIB has not been loaded")
+
+
+
+class CibPushFull(TestCase, MangeCibAssertionMixin):
+ custom_cib = "<custom_cib />"
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def test_push_custom_without_get(self):
+ self.config.runner.cib.push_independent(self.custom_cib)
+ self.env_assist.get_env().push_cib_full(etree.XML(self.custom_cib))
+
+ def test_push_custom_after_get(self):
+ self.config.runner.cib.load()
+ env = self.env_assist.get_env()
+ env.get_cib()
+
+ with self.assertRaises(AssertionError) as context_manager:
+ env.push_cib_full(etree.XML(self.custom_cib))
+ self.assertEqual(
+ str(context_manager.exception),
+ "CIB has been loaded, cannot push custom CIB"
+ )
+
+ def test_push_fails(self):
+ (self.config
+ .runner.cib.load()
+ .runner.cib.push(stderr="invalid cib", returncode=1)
+ )
+ env = self.env_assist.get_env()
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ env.push_cib_full,
+ [
+ (
+ severity.ERROR,
+ report_codes.CIB_PUSH_ERROR,
+ {
+ "reason": "invalid cib",
+ },
+ None
+ )
+ ],
+ expected_in_processor=False
+ )
+
+ def test_get_and_push(self):
+ (self.config
+ .runner.cib.load()
+ .runner.cib.push()
+ )
+ env = self.env_assist.get_env()
+ env.get_cib()
+ env.push_cib_full()
+
+ def test_can_get_after_push(self):
+ (self.config
+ .runner.cib.load()
+ .runner.cib.push()
+ .runner.cib.load(name="load_cib_2")
+ )
+
+ env = self.env_assist.get_env()
+ env.get_cib()
+ env.push_cib_full()
+ # need to use lambda because env.cib is a property
+ self.assert_raises_cib_not_loaded(lambda: env.cib)
+ env.get_cib()
+
+
+class CibPushDiff(TestCase, MangeCibAssertionMixin):
+ def setUp(self):
+ tmpfile_patcher = mock.patch("pcs.lib.pacemaker.live.write_tmpfile")
+ self.addCleanup(tmpfile_patcher.stop)
+ self.mock_write_tmpfile = tmpfile_patcher.start()
+ self.tmpfile_old = mock_tmpfile("old.cib")
+ self.tmpfile_new = mock_tmpfile("new.cib")
+ self.mock_write_tmpfile.side_effect = [
+ self.tmpfile_old, self.tmpfile_new
+ ]
+
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def config_load_and_push(self, filename="cib-empty.xml"):
+ (self.config
+ .runner.cib.load(filename=filename)
+ .runner.cib.diff(self.tmpfile_old.name, self.tmpfile_new.name)
+ .runner.cib.push_diff()
+ )
+
+ def push_reports(self, strip_old=False):
+ # No test changes the CIB between load and push. The point is to test
+ # loading and pushing, not editing the CIB.
+ loaded_cib = self.config.calls.get("runner.cib.load").stdout
+ return [
+ (
+ severity.DEBUG,
+ report_codes.TMP_FILE_WRITE,
+ {
+ "file_path": self.tmpfile_old.name,
+ "content": loaded_cib.strip() if strip_old else loaded_cib,
+ },
+ None
+ ),
+ (
+ severity.DEBUG,
+ report_codes.TMP_FILE_WRITE,
+ {
+ "file_path": self.tmpfile_new.name,
+ "content": loaded_cib.strip(),
+ },
+ None
+ ),
+ ]
+
+ def assert_tmps_write_reported(self):
+ self.env_assist.assert_reports(self.push_reports())
+
+ def test_tmpfile_fails(self):
+ self.config.runner.cib.load()
+ self.mock_write_tmpfile.side_effect = EnvironmentError("test error")
+ env = self.env_assist.get_env()
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ env.push_cib_diff,
+ [
+ (
+ severity.ERROR,
+ report_codes.CIB_SAVE_TMP_ERROR,
+ {
+ "reason": "test error",
+ },
+ None
+ )
+ ],
+ expected_in_processor=False
+ )
+
+ def test_diff_fails(self):
+ (self.config
+ .runner.cib.load()
+ .runner.cib.diff(
+ self.tmpfile_old.name,
+ self.tmpfile_new.name,
+ stderr="invalid cib",
+ returncode=1
+ )
+ )
+ env = self.env_assist.get_env()
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ env.push_cib_diff,
+ [
+ (
+ severity.ERROR,
+ report_codes.CIB_DIFF_ERROR,
+ {
+ "reason": "invalid cib",
+ },
+ None
+ )
+ ],
+ expected_in_processor=False
+ )
+ self.assert_tmps_write_reported()
+
+ def test_push_fails(self):
+ (self.config
+ .runner.cib.load()
+ .runner.cib.diff(self.tmpfile_old.name, self.tmpfile_new.name)
+ .runner.cib.push_diff(stderr="invalid cib", returncode=1)
+ )
+ env = self.env_assist.get_env()
+ env.get_cib()
+ self.env_assist.assert_raise_library_error(
+ env.push_cib_diff,
+ [
+ (
+ severity.ERROR,
+ report_codes.CIB_PUSH_ERROR,
+ {
+ "reason": "invalid cib",
+ },
+ None
+ )
+ ],
+ expected_in_processor=False
+ )
+ self.assert_tmps_write_reported()
+
+ def test_get_and_push(self):
+ self.config_load_and_push()
+
+ env = self.env_assist.get_env()
+
+ env.get_cib()
+ env.push_cib_diff()
+ self.assert_tmps_write_reported()
+
+ def test_can_get_after_push(self):
+ self.config_load_and_push()
+ self.config.runner.cib.load(name="load_cib_2")
+
+ env = self.env_assist.get_env()
+ env.get_cib()
+ env.push_cib_diff()
+ # need to use lambda because env.cib is a property
+ self.assert_raises_cib_not_loaded(lambda: env.cib)
+ env.get_cib()
+ self.assert_tmps_write_reported()
+
+
+class UpgradeCib(TestCase):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def test_get_and_push_cib_version_upgrade_needed(self):
+ (self.config
+ .runner.cib.load(name="load_cib_old")
+ .runner.cib.upgrade()
+ .runner.cib.load(filename="cib-empty-2.8.xml")
+ )
+ env = self.env_assist.get_env()
+ env.get_cib((2, 8, 0))
+
+ self.env_assist.assert_reports(
+ [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)]
+ )
+
+ def test_get_and_push_cib_version_upgrade_not_needed(self):
+ self.config.runner.cib.load(filename="cib-empty-2.6.xml")
+ env = self.env_assist.get_env()
+ env.get_cib((2, 5, 0))
+
+
+class ManageCib(TestCase, MangeCibAssertionMixin):
+ def setUp(self):
+ self.env_assist, self.config = get_env_tools(test_case=self)
+
+ def test_raise_library_error_when_cibadmin_failed(self):
+ stderr = "cibadmin: Connection to local file failed..."
+ (self.config
+ #Value of cib_data is unimportant here. This content is only put
+ #into tempfile when the runner is not mocked. And content is then
+ #loaded from tempfile by `cibadmin --local --query`. Runner is
+ #mocked in tests so the value of cib_data is not in the fact used.
+ .env.set_cib_data("whatever")
+ .runner.cib.load(returncode=203, stderr=stderr)
+ )
+
+ self.env_assist.assert_raise_library_error(
+ self.env_assist.get_env().get_cib,
+ [
+ fixture.error(report_codes.CIB_LOAD_ERROR, reason=stderr)
+ ],
+ expected_in_processor=False
+ )
+
+ def test_returns_cib_from_cib_data(self):
+ cib_filename = "cib-empty.xml"
+ (self.config
+ #Value of cib_data is unimportant here. See details in sibling test.
+ .env.set_cib_data("whatever")
+ .runner.cib.load(filename=cib_filename)
+ )
+ assert_xml_equal(
+ etree_to_str(self.env_assist.get_env().get_cib()),
+ open(rc(cib_filename)).read()
+ )
+
+ def test_get_and_property(self):
+ self.config.runner.cib.load()
+ env = self.env_assist.get_env()
+ self.assertEqual(env.get_cib(), env.cib)
+
+ def test_property_without_get(self):
+ env = self.env_assist.get_env()
+ # need to use lambda because env.cib is a property
+ self.assert_raises_cib_not_loaded(lambda: env.cib)
+
+ def test_double_get(self):
+ self.config.runner.cib.load()
+ env = self.env_assist.get_env()
+ env.get_cib()
+ self.assert_raises_cib_error(env.get_cib, "CIB has already been loaded")
+
+ def test_push_without_get(self):
+ env = self.env_assist.get_env()
+ self.assert_raises_cib_not_loaded(env.push_cib_diff)
diff --git a/pcs/lib/test/test_env_file.py b/pcs/lib/test/test_env_file.py
index f9b7b57..27268df 100644
--- a/pcs/lib/test/test_env_file.py
+++ b/pcs/lib/test/test_env_file.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/test/test_errors.py b/pcs/lib/test/test_errors.py
index 323e9f7..b108f6c 100644
--- a/pcs/lib/test/test_errors.py
+++ b/pcs/lib/test/test_errors.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/lib/test/test_node_communication.py b/pcs/lib/test/test_node_communication.py
new file mode 100644
index 0000000..56c0da2
--- /dev/null
+++ b/pcs/lib/test/test_node_communication.py
@@ -0,0 +1,539 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import io
+import logging
+
+from pcs.test.tools.assertions import assert_report_item_equal
+from pcs.test.tools.custom_mock import (
+ MockCurl,
+ MockCurlSimple,
+ MockLibraryReportProcessor,
+)
+from pcs.test.tools.misc import outdent
+from pcs.test.tools.pcs_unittest import (
+ mock,
+ TestCase,
+)
+
+from pcs.common import (
+ pcs_pycurl as pycurl,
+ report_codes,
+)
+from pcs.common.node_communicator import (
+ Request,
+ RequestData,
+ RequestTarget,
+ Response,
+)
+from pcs.lib.errors import ReportItemSeverity as severity
+import pcs.lib.node_communication as lib
+
+
+class ResponseToReportItemTest(TestCase):
+ def fixture_response_connected(self, response_code):
+ handle = MockCurl({pycurl.RESPONSE_CODE: response_code})
+ handle.request_obj = Request(
+ RequestTarget(self.host), RequestData(self.request)
+ )
+ handle.output_buffer = io.BytesIO()
+ handle.output_buffer.write(self.data)
+ return Response.connection_successful(handle)
+
+ def fixture_response_not_connected(self, errno, error_msg):
+ handle = MockCurl()
+ handle.request_obj = Request(
+ RequestTarget(self.host), RequestData(self.request)
+ )
+ return Response.connection_failure(handle, errno, error_msg)
+
+ def setUp(self):
+ self.host = "host"
+ self.request = "request"
+ self.data = b"data"
+
+ def test_code_200(self):
+ self.assertIsNone(
+ lib.response_to_report_item(self.fixture_response_connected(200))
+ )
+
+ def test_code_400(self):
+ assert_report_item_equal(
+ lib.response_to_report_item(self.fixture_response_connected(400)),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": self.data.decode("utf-8")
+ },
+ None
+ )
+ )
+
+ def test_code_401(self):
+ assert_report_item_equal(
+ lib.response_to_report_item(self.fixture_response_connected(401)),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": "HTTP error: 401"
+ },
+ None
+ )
+ )
+
+ def test_code_403(self):
+ assert_report_item_equal(
+ lib.response_to_report_item(self.fixture_response_connected(403)),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": "HTTP error: 403"
+ },
+ None
+ )
+ )
+
+ def test_code_404(self):
+ assert_report_item_equal(
+ lib.response_to_report_item(self.fixture_response_connected(404)),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": "HTTP error: 404"
+ },
+ None
+ )
+ )
+
+ def test_code_other(self):
+ assert_report_item_equal(
+ lib.response_to_report_item(self.fixture_response_connected(500)),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": "HTTP error: 500"
+ },
+ None
+ )
+ )
+
+ def test_timed_out(self):
+ response = self.fixture_response_not_connected(
+ pycurl.E_OPERATION_TIMEDOUT, "err"
+ )
+ assert_report_item_equal(
+ lib.response_to_report_item(response),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_TIMED_OUT,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": "err"
+ },
+ None
+ )
+ )
+
+ def test_timedouted(self):
+ response = self.fixture_response_not_connected(
+ pycurl.E_OPERATION_TIMEOUTED, "err"
+ )
+ assert_report_item_equal(
+ lib.response_to_report_item(response),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_TIMED_OUT,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": "err"
+ },
+ None
+ )
+ )
+
+ def test_unable_to_connect(self):
+ response = self.fixture_response_not_connected(
+ pycurl.E_SEND_ERROR, "err"
+ )
+ assert_report_item_equal(
+ lib.response_to_report_item(response),
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+ {
+ "node": self.host,
+ "command": self.request,
+ "reason": "err"
+ },
+ None
+ )
+ )
+
+
+class IsProxySetTest(TestCase):
+ def test_without_proxy(self):
+ self.assertFalse(lib.is_proxy_set({
+ "var1": "value",
+ "var2": "val",
+ }))
+
+ def test_multiple(self):
+ self.assertTrue(lib.is_proxy_set({
+ "var1": "val",
+ "https_proxy": "test.proxy",
+ "var2": "val",
+ "all_proxy": "test2.proxy",
+ "var3": "val",
+ }))
+
+ def test_empty_string(self):
+ self.assertFalse(lib.is_proxy_set({
+ "all_proxy": "",
+ }))
+
+ def test_http_proxy(self):
+ self.assertFalse(lib.is_proxy_set({
+ "http_proxy": "test.proxy",
+ }))
+
+ def test_HTTP_PROXY(self):
+ self.assertFalse(lib.is_proxy_set({
+ "HTTP_PROXY": "test.proxy",
+ }))
+
+ def test_https_proxy(self):
+ self.assertTrue(lib.is_proxy_set({
+ "https_proxy": "test.proxy",
+ }))
+
+ def test_HTTPS_PROXY(self):
+ self.assertTrue(lib.is_proxy_set({
+ "HTTPS_PROXY": "test.proxy",
+ }))
+
+ def test_all_proxy(self):
+ self.assertTrue(lib.is_proxy_set({
+ "all_proxy": "test.proxy",
+ }))
+
+ def test_ALL_PROXY(self):
+ self.assertTrue(lib.is_proxy_set({
+ "ALL_PROXY": "test.proxy",
+ }))
+
+ def test_no_proxy(self):
+ self.assertTrue(lib.is_proxy_set({
+ "no_proxy": "*",
+ "all_proxy": "test.proxy",
+ }))
+
+
+def fixture_logger_call_send(url, data):
+ send_msg = "Sending HTTP Request to: {url}"
+ if data:
+ send_msg += "\n--Debug Input Start--\n{data}\n--Debug Input End--"
+ return mock.call.debug(send_msg.format(url=url, data=data))
+
+
+def fixture_logger_call_debug_data(url, data):
+ send_msg = outdent("""\
+ Communication debug info for calling: {url}
+ --Debug Communication Info Start--
+ {data}
+ --Debug Communication Info End--"""
+ )
+ return mock.call.debug(send_msg.format(url=url, data=data))
+
+
+def fixture_logger_call_connected(url, response_code, response_data):
+ result_msg = (
+ "Finished calling: {url}\nResponse Code: {code}"
+ + "\n--Debug Response Start--\n{response}\n--Debug Response End--"
+ )
+ return mock.call.debug(result_msg.format(
+ url=url, code=response_code, response=response_data
+ ))
+
+
+def fixture_logger_call_not_connected(node, reason):
+ msg = "Unable to connect to {node} ({reason})"
+ return mock.call.debug(msg.format(node=node, reason=reason))
+
+
+def fixture_logger_call_proxy_set():
+ return mock.call.warning("Proxy is set")
+
+
+def fixture_logger_calls_on_success(
+ url, response_code, response_data, debug_data
+):
+ return [
+ fixture_logger_call_connected(url, response_code, response_data),
+ fixture_logger_call_debug_data(url, debug_data),
+ ]
+
+def fixture_report_item_list_send(url, data):
+ return [(
+ severity.DEBUG,
+ report_codes.NODE_COMMUNICATION_STARTED,
+ {
+ "target": url,
+ "data": data,
+ }
+ )]
+
+
+def fixture_report_item_list_debug(url, data):
+ return [(
+ severity.DEBUG,
+ report_codes.NODE_COMMUNICATION_DEBUG_INFO,
+ {
+ "target": url,
+ "data": data,
+ }
+ )]
+
+
+def fixture_report_item_list_connected(url, response_code, response_data):
+ return [(
+ severity.DEBUG,
+ report_codes.NODE_COMMUNICATION_FINISHED,
+ {
+ "target": url,
+ "response_code": response_code,
+ "response_data": response_data,
+ }
+ )]
+
+
+def fixture_report_item_list_not_connected(node, reason):
+ return [(
+ severity.DEBUG,
+ report_codes.NODE_COMMUNICATION_NOT_CONNECTED,
+ {
+ "node": node,
+ "reason": reason,
+ },
+ None
+ )]
+
+
+def fixture_report_item_list_proxy_set(node, address):
+ return [(
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_PROXY_IS_SET,
+ {
+ "node": node,
+ "address": address,
+ },
+ None
+ )]
+
+
+def fixture_report_item_list_on_success(
+ url, response_code, response_data, debug_data
+):
+ return (
+ fixture_report_item_list_connected(url, response_code, response_data)
+ +
+ fixture_report_item_list_debug(url, debug_data)
+ )
+
+
+def fixture_request():
+ return Request(RequestTarget("host"), RequestData("action"))
+
+
+class CommunicatorLoggerTest(TestCase):
+ def setUp(self):
+ self.logger = mock.MagicMock(spec_set=logging.Logger)
+ self.reporter = MockLibraryReportProcessor()
+ self.com_logger = lib.LibCommunicatorLogger(self.logger, self.reporter)
+
+ def test_log_request_start(self):
+ request = fixture_request()
+ self.com_logger.log_request_start(request)
+ self.reporter.assert_reports(
+ fixture_report_item_list_send(request.url, request.data)
+ )
+ self.assertEqual(
+ [fixture_logger_call_send(request.url, request.data)],
+ self.logger.mock_calls
+ )
+
+ def test_log_response_connected(self):
+ expected_code = 200
+ expected_data = "data"
+ expected_debug_data = "* text\n>> data out\n"
+ response = Response.connection_successful(
+ MockCurlSimple(
+ info={pycurl.RESPONSE_CODE: expected_code},
+ output=expected_data.encode("utf-8"),
+ debug_output=expected_debug_data.encode("utf-8"),
+ request=fixture_request(),
+ )
+ )
+ self.com_logger.log_response(response)
+ self.reporter.assert_reports(
+ fixture_report_item_list_on_success(
+ response.request.url,
+ expected_code,
+ expected_data,
+ expected_debug_data
+ )
+ )
+ logger_calls = fixture_logger_calls_on_success(
+ response.request.url,
+ expected_code,
+ expected_data,
+ expected_debug_data
+ )
+ self.assertEqual(logger_calls, self.logger.mock_calls)
+
+ @mock.patch("pcs.lib.node_communication.is_proxy_set")
+ def test_log_response_not_connected(self, mock_proxy):
+ mock_proxy.return_value = False
+ expected_debug_data = "* text\n>> data out\n"
+ error_msg = "error"
+ response = Response.connection_failure(
+ MockCurlSimple(
+ debug_output=expected_debug_data.encode("utf-8"),
+ request=fixture_request(),
+ ),
+ pycurl.E_HTTP_POST_ERROR,
+ error_msg,
+ )
+ self.com_logger.log_response(response)
+ self.reporter.assert_reports(
+ fixture_report_item_list_not_connected(
+ response.request.host_label, error_msg
+ )
+ +
+ fixture_report_item_list_debug(
+ response.request.url, expected_debug_data
+ )
+ )
+ logger_calls = [
+ fixture_logger_call_not_connected(
+ response.request.host_label, error_msg
+ ),
+ fixture_logger_call_debug_data(
+ response.request.url, expected_debug_data
+ )
+ ]
+ self.assertEqual(logger_calls, self.logger.mock_calls)
+
+ @mock.patch("pcs.lib.node_communication.is_proxy_set")
+ def test_log_response_not_connected_with_proxy(self, mock_proxy):
+ mock_proxy.return_value = True
+ expected_debug_data = "* text\n>> data out\n"
+ error_msg = "error"
+ response = Response.connection_failure(
+ MockCurlSimple(
+ debug_output=expected_debug_data.encode("utf-8"),
+ request=fixture_request(),
+ ),
+ pycurl.E_HTTP_POST_ERROR,
+ error_msg,
+ )
+ self.com_logger.log_response(response)
+ self.reporter.assert_reports(
+ fixture_report_item_list_not_connected(
+ response.request.host_label, error_msg
+ )
+ +
+ fixture_report_item_list_proxy_set(
+ response.request.host_label, response.request.host
+ )
+ +
+ fixture_report_item_list_debug(
+ response.request.url, expected_debug_data
+ )
+ )
+ logger_calls = [
+ fixture_logger_call_not_connected(
+ response.request.host_label, error_msg
+ ),
+ fixture_logger_call_proxy_set(),
+ fixture_logger_call_debug_data(
+ response.request.url, expected_debug_data
+ )
+ ]
+ self.assertEqual(logger_calls, self.logger.mock_calls)
+
+ def test_log_retry(self):
+ prev_host = "prev host"
+ response = Response.connection_failure(
+ MockCurlSimple(request=fixture_request()),
+ pycurl.E_HTTP_POST_ERROR,
+ "e",
+ )
+ self.com_logger.log_retry(response, prev_host)
+ self.reporter.assert_reports([(
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_RETRYING,
+ {
+ "node": response.request.host_label,
+ "failed_address": prev_host,
+ "next_address": response.request.host,
+ "request": response.request.url,
+ },
+ None
+ )])
+ logger_call = mock.call.warning(
+ (
+ "Unable to connect to '{label}' via address '{old_addr}'. "
+ "Retrying request '{req}' via address '{new_addr}'"
+ ).format(
+ label=response.request.host_label,
+ old_addr=prev_host,
+ new_addr=response.request.host,
+ req=response.request.url,
+ )
+ )
+ self.assertEqual([logger_call], self.logger.mock_calls)
+
+ def test_log_no_more_addresses(self):
+ response = Response.connection_failure(
+ MockCurlSimple(request=fixture_request()),
+ pycurl.E_HTTP_POST_ERROR,
+ "e"
+ )
+ self.com_logger.log_no_more_addresses(response)
+ self.reporter.assert_reports([(
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_NO_MORE_ADDRESSES,
+ {
+ "node": response.request.host_label,
+ "request": response.request.url,
+ },
+ None
+ )])
+ logger_call = mock.call.warning(
+ "No more addresses for node {label} to run '{req}'".format(
+ label=response.request.host_label,
+ req=response.request.url,
+ )
+ )
+ self.assertEqual([logger_call], self.logger.mock_calls)
+
diff --git a/pcs/lib/test/test_node_communication_format.py b/pcs/lib/test/test_node_communication_format.py
index 0cad76f..c6a5cfa 100644
--- a/pcs/lib/test/test_node_communication_format.py
+++ b/pcs/lib/test/test_node_communication_format.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.assertions import assert_raise_library_error
diff --git a/pcs/lib/test/test_nodes_task.py b/pcs/lib/test/test_nodes_task.py
index 5459337..249119b 100644
--- a/pcs/lib/test/test_nodes_task.py
+++ b/pcs/lib/test/test_nodes_task.py
@@ -2,12 +2,11 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import json
-from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase, skip
from pcs.test.tools.assertions import (
assert_raise_library_error,
@@ -22,10 +21,13 @@ from pcs.lib.external import NodeCommunicator, NodeAuthenticationException
from pcs.lib.node import NodeAddresses, NodeAddressesList
from pcs.lib.errors import ReportItemSeverity as severity
-import pcs.lib.nodes_task as lib
+# import pcs.lib.nodes_task as lib
+lib = mock.Mock()
+lib.__name__ = "nodes_task"
patch_nodes_task = create_patcher(lib)
+ at skip("TODO: rewrite for pcs.lib.communication.corosync.DistributeCorosyncConf")
class DistributeCorosyncConfTest(TestCase):
def setUp(self):
self.mock_reporter = MockLibraryReportProcessor()
@@ -250,6 +252,7 @@ class DistributeCorosyncConfTest(TestCase):
]
)
+ at skip("TODO: rewrite for pcs.lib.communication.corosync.CheckCorosyncOffline")
class CheckCorosyncOfflineTest(TestCase):
def setUp(self):
self.mock_reporter = MockLibraryReportProcessor()
@@ -445,7 +448,10 @@ class CheckCorosyncOfflineTest(TestCase):
]
)
-
+ at skip(
+ "TODO: rewrite for pcs.lib.communication.qdevice.Stop and "
+ "pcs.lib.communication.qdevice.Start"
+)
@patch_nodes_task("qdevice_client.remote_client_stop")
@patch_nodes_task("qdevice_client.remote_client_start")
class QdeviceReloadOnNodesTest(TestCase):
@@ -591,7 +597,7 @@ class QdeviceReloadOnNodesTest(TestCase):
]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.nodes.GetOnlineTargets")
class NodeCheckAuthTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -617,152 +623,7 @@ def assert_call_cause_reports(call, expected_report_items):
call(report_items)
assert_report_item_list_equal(report_items, expected_report_items)
-class CallForJson(TestCase):
- def setUp(self):
- self.node = NodeAddresses("node1")
- self.node_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-
- def make_call(self, report_items):
- lib._call_for_json(
- self.node_communicator,
- self.node,
- "some/path",
- report_items
- )
-
- def test_report_no_json_response(self):
- #leads to ValueError
- self.node_communicator.call_node = mock.Mock(return_value="bad answer")
- assert_call_cause_reports(self.make_call, [
- fixture_invalid_response_format(self.node.label)
- ])
-
- def test_process_communication_exception(self):
- self.node_communicator.call_node = mock.Mock(
- side_effect=NodeAuthenticationException("node", "request", "reason")
- )
- assert_call_cause_reports(self.make_call, [
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
- {
- 'node': 'node',
- 'reason': 'reason',
- 'command': 'request'
- },
- report_codes.SKIP_OFFLINE_NODES,
- )
- ])
-
-class AvailabilityCheckerNode(TestCase):
- def setUp(self):
- self.node = "node1"
-
- def assert_result_causes_reports(
- self, availability_info, expected_report_items
- ):
- report_items = []
- lib.availability_checker_node(
- availability_info,
- report_items,
- self.node
- )
- assert_report_item_list_equal(report_items, expected_report_items)
-
- def test_no_reports_when_available(self):
- self.assert_result_causes_reports({"node_available": True}, [])
-
- def test_report_node_is_in_cluster(self):
- self.assert_result_causes_reports({"node_available": False}, [
- (
- severity.ERROR,
- report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
- {
- "node": self.node
- }
- ),
- ])
-
- def test_report_node_is_running_pacemaker_remote(self):
- self.assert_result_causes_reports(
- {"node_available": False, "pacemaker_remote": True},
- [
- (
- severity.ERROR,
- report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
- {
- "node": self.node,
- "service": "pacemaker_remote",
- }
- ),
- ]
- )
-
- def test_report_node_is_running_pacemaker(self):
- self.assert_result_causes_reports(
- {"node_available": False, "pacemaker_running": True},
- [
- (
- severity.ERROR,
- report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
- {
- "node": self.node,
- "service": "pacemaker",
- }
- ),
- ]
- )
-
-class AvailabilityCheckerRemoteNode(TestCase):
- def setUp(self):
- self.node = "node1"
-
- def assert_result_causes_reports(
- self, availability_info, expected_report_items
- ):
- report_items = []
- lib.availability_checker_remote_node(
- availability_info,
- report_items,
- self.node
- )
- assert_report_item_list_equal(report_items, expected_report_items)
-
- def test_no_reports_when_available(self):
- self.assert_result_causes_reports({"node_available": True}, [])
-
- def test_report_node_is_running_pacemaker(self):
- self.assert_result_causes_reports(
- {"node_available": False, "pacemaker_running": True},
- [
- (
- severity.ERROR,
- report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
- {
- "node": self.node,
- "service": "pacemaker",
- }
- ),
- ]
- )
-
- def test_report_node_is_in_cluster(self):
- self.assert_result_causes_reports({"node_available": False}, [
- (
- severity.ERROR,
- report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
- {
- "node": self.node
- }
- ),
- ])
-
- def test_no_reports_when_pacemaker_remote_there(self):
- self.assert_result_causes_reports(
- {"node_available": False, "pacemaker_remote": True},
- []
- )
-
+ at skip("TODO: rewrite for pcs.lib.communication.nodes.PrecheckNewNode")
class CheckCanAddNodeToCluster(TestCase):
def setUp(self):
self.node = NodeAddresses("node1")
@@ -801,6 +662,10 @@ class OnNodeTest(TestCase):
return_value=json.dumps(result)
)
+ at skip(
+ "TODO: rewrite for pcs.lib.communication.nodes.RunActionBase and it's "
+ "descendants"
+)
class RunActionOnNode(OnNodeTest):
def make_call(self):
return lib.run_actions_on_node(
diff --git a/pcs/lib/test/test_resource_agent.py b/pcs/lib/test/test_resource_agent.py
index 1df618b..4da4467 100644
--- a/pcs/lib/test/test_resource_agent.py
+++ b/pcs/lib/test/test_resource_agent.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
@@ -979,7 +978,10 @@ class AgentMetadataGetActionsTest(TestCase):
)
- at patch_agent_object("DEFAULT_CIB_ACTION_NAMES", ["monitor", "start"])
+ at patch_agent_object(
+ "_is_cib_default_action",
+ lambda self, action: action.get("name") == "monitor"
+)
@patch_agent_object("get_actions")
class AgentMetadataGetCibDefaultActions(TestCase):
def setUp(self):
@@ -987,22 +989,56 @@ class AgentMetadataGetCibDefaultActions(TestCase):
mock.MagicMock(spec_set=CommandRunner)
)
- def test_select_only_actions_for_cib(self, get_actions):
+ def test_complete_monitor(self, get_actions):
+ get_actions.return_value = [{"name": "meta-data"}]
+ self.assertEqual(
+ [{"name": "monitor", "interval": "60s"}],
+ self.agent.get_cib_default_actions()
+ )
+
+ def test_complete_intervals(self, get_actions):
get_actions.return_value = [
- {"name": "metadata"},
- {"name": "start", "interval": "40s"},
- {"name": "monitor", "interval": "10s", "timeout": "30s"},
+ {"name": "meta-data"},
+ {"name": "monitor", "timeout": "30s"},
]
self.assertEqual(
+ [{"name": "monitor", "interval": "60s", "timeout": "30s"}],
+ self.agent.get_cib_default_actions()
+ )
+
+
+ at mock.patch.object(lib_ra.ResourceAgent, "get_actions")
+class ResourceAgentMetadataGetCibDefaultActions(TestCase):
+ fixture_actions = [
+ {"name": "custom1", "timeout": "40s"},
+ {"name": "custom2", "interval": "25s", "timeout": "60s"},
+ {"name": "meta-data"},
+ {"name": "monitor", "interval": "10s", "timeout": "30s"},
+ {"name": "start", "timeout": "40s"},
+ {"name": "status", "interval": "15s", "timeout": "20s"},
+ {"name": "validate-all"},
+ ]
+
+ def setUp(self):
+ self.agent = lib_ra.ResourceAgent(
+ mock.MagicMock(spec_set=CommandRunner),
+ "ocf:pacemaker:Dummy"
+ )
+
+ def test_select_only_actions_for_cib(self, get_actions):
+ get_actions.return_value = self.fixture_actions
+ self.assertEqual(
[
- {"name": "start", "interval": "40s"},
- {"name": "monitor", "interval": "10s", "timeout": "30s"}
+ {"name": "custom1", "interval": "0s", "timeout": "40s"},
+ {"name": "custom2", "interval": "25s", "timeout": "60s"},
+ {"name": "monitor", "interval": "10s", "timeout": "30s"},
+ {"name": "start", "interval": "0s", "timeout": "40s"},
],
self.agent.get_cib_default_actions()
)
def test_complete_monitor(self, get_actions):
- get_actions.return_value = [{"name": "metadata"}]
+ get_actions.return_value = [{"name": "meta-data"}]
self.assertEqual(
[{"name": "monitor", "interval": "60s"}],
self.agent.get_cib_default_actions()
@@ -1010,7 +1046,7 @@ class AgentMetadataGetCibDefaultActions(TestCase):
def test_complete_intervals(self, get_actions):
get_actions.return_value = [
- {"name": "metadata"},
+ {"name": "meta-data"},
{"name": "monitor", "timeout": "30s"},
]
self.assertEqual(
@@ -1019,11 +1055,7 @@ class AgentMetadataGetCibDefaultActions(TestCase):
)
def test_select_only_necessary_actions_for_cib(self, get_actions):
- get_actions.return_value = [
- {"name": "metadata"},
- {"name": "start", "interval": "40s"},
- {"name": "monitor", "interval": "10s", "timeout": "30s"},
- ]
+ get_actions.return_value = self.fixture_actions
self.assertEqual(
[
{"name": "monitor", "interval": "10s", "timeout": "30s"}
diff --git a/pcs/lib/test/test_validate.py b/pcs/lib/test/test_validate.py
index d646d3f..37848a2 100644
--- a/pcs/lib/test/test_validate.py
+++ b/pcs/lib/test/test_validate.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/test/test_xml_tools.py b/pcs/lib/test/test_xml_tools.py
similarity index 79%
rename from pcs/test/test_xml_tools.py
rename to pcs/lib/test/test_xml_tools.py
index 4dea1fd..e29ded1 100644
--- a/pcs/test/test_xml_tools.py
+++ b/pcs/lib/test/test_xml_tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
@@ -165,3 +164,42 @@ class EtreeElementAttributesToDictTest(TestCase):
self.el, ["id", "not_existing", "attribute"]
)
)
+
+class RemoveWhenPointless(TestCase):
+ def assert_count_tags_after_call(self, count, tag, **kwargs):
+ tree = etree.fromstring(
+ """
+ <root>
+ <empty />
+ <with-subelement>
+ <subelement/>
+ </with-subelement>
+ <with-attr some="attribute"/>
+ <with-only-id id="1"/>
+ </root>
+ """
+ )
+ xpath=".//{0}".format(tag)
+ lib.remove_when_pointless(tree.find(xpath), **kwargs)
+ self.assertEqual(len(tree.xpath(xpath)), count)
+
+ def assert_remove(self, tag, **kwargs):
+ self.assert_count_tags_after_call(0, tag, **kwargs)
+
+ def assert_keep(self, tag, **kwargs):
+ self.assert_count_tags_after_call(1, tag, **kwargs)
+
+ def test_remove_empty(self):
+ self.assert_remove("empty")
+
+ def test_keep_with_subelement(self):
+ self.assert_keep("with-subelement")
+
+ def test_keep_when_attr(self):
+ self.assert_keep("with-attr")
+
+ def test_remove_when_attr_not_important(self):
+ self.assert_remove("with-attr", attribs_important=False)
+
+ def test_remove_when_only_id(self):
+ self.assert_remove("with-only-id")
diff --git a/pcs/lib/tools.py b/pcs/lib/tools.py
index b9d7505..0d0dc46 100644
--- a/pcs/lib/tools.py
+++ b/pcs/lib/tools.py
@@ -2,10 +2,10 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import binascii
import os
+import tempfile
def generate_key(random_bytes_count=32):
@@ -40,7 +40,6 @@ def environment_file_to_dict(config):
data[key.strip()] = value
return data
-
def dict_to_environment_file(config_dict):
"""
Convert data in dictionary to Environment file format.
@@ -55,3 +54,16 @@ def dict_to_environment_file(config_dict):
for key, val in sorted(config_dict.items()):
lines.append("{key}={val}\n".format(key=key, val=val))
return "".join(lines)
+
+def write_tmpfile(data, binary=False):
+ """
+ Write data to a new tmp file and return the file; raises EnvironmentError.
+
+ string or bytes data -- data to write to the file
+ bool binary -- treat data as binary?
+ """
+ mode = "w+b" if binary else "w+"
+ tmpfile = tempfile.NamedTemporaryFile(mode=mode, suffix=".pcs")
+ tmpfile.write(data)
+ tmpfile.flush()
+ return tmpfile
diff --git a/pcs/lib/validate.py b/pcs/lib/validate.py
index e572ba9..c316ea8 100644
--- a/pcs/lib/validate.py
+++ b/pcs/lib/validate.py
@@ -35,7 +35,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from collections import namedtuple
diff --git a/pcs/lib/xml_tools.py b/pcs/lib/xml_tools.py
index 67e7ca1..e59e484 100644
--- a/pcs/lib/xml_tools.py
+++ b/pcs/lib/xml_tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
@@ -84,3 +83,44 @@ def etree_element_attibutes_to_dict(etree_el, required_key_list):
extracted
"""
return dict([(key, etree_el.get(key)) for key in required_key_list])
+
+def etree_to_str(tree):
+ """
+ Export a lxml tree to a string
+ etree tree - the tree to be exported
+ """
+ #etree returns string in bytes: b'xml'
+ #python 3 removed .encode() from byte strings
+ #run(...) calls subprocess.Popen.communicate which calls encode...
+ #so there is bytes to str conversion
+ return etree.tostring(tree).decode()
+
+def remove_when_pointless(element, attribs_important=True):
+ """
+ Remove element when is not worth to keep it.
+
+ Some elements serve as a container for sub-elements. When all sub-elements
+ are removed is time to consider if such element is still meaningfull.
+
+ Some of these elements can be meaningfull standalone when it contains some
+ attributes (e.g. "network" or "storage" in "bundle"). Some of these elements
+ are not meaningfull without sub-elements even if they have attributes (e.g.
+ rsc_ticket - after last sub-element 'resource_set' removal there can be
+ attributes but the element is pointless - more details at the approrpriate
+ place of use). Element is meaningfull when contain attributes (except id) by
+ default. It can be switched by parameter attribs_important.
+
+ lxml.etree.element element -- element to remove
+ bool attribs_important -- prevents deletion when its value is True and
+ the element contains attributes
+ """
+ is_element_useful = len(element) or (
+ attribs_important
+ and
+ element.attrib
+ and
+ element.attrib.keys() != ["id"]
+ )
+
+ if not is_element_useful:
+ element.getparent().remove(element)
diff --git a/pcs/node.py b/pcs/node.py
index 6bfe1bc..1478f68 100644
--- a/pcs/node.py
+++ b/pcs/node.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index f1b85ba..1944212 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "June 2017" "pcs 0.9.159" "System Administration Utilities"
+.TH PCS "8" "October 2017" "pcs 0.9.160" "System Administration Utilities"
.SH NAME
pcs \- pacemaker/corosync configuration system
.SH SYNOPSIS
@@ -20,7 +20,7 @@ Print all network traffic and external commands run.
\fB\-\-version\fR
Print pcs version information.
.TP
-\fB\-\-request\-timeout=<timeout>\fR
+\fB\-\-request\-timeout\fR=<timeout>
Timeout for each outgoing request to another node in seconds. Default is 60s.
.SS "Commands:"
.TP
@@ -202,7 +202,7 @@ utilization [<resource id> [<name>=<value> ...]]
Add specified utilization options to specified resource. If resource is not specified, shows utilization of all resources. If utilization options are not specified, shows utilization of specified resource. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs resource utilization TestResource cpu= ram=20
.SS "cluster"
.TP
-auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\fB\-\-local\fR]
+auth [<node>[:<port>]] [...] [\fB\-u\fR <username>] [\fB\-p\fR <password>] [\fB\-\-force\fR] [\fB\-\-local\fR]
Authenticate pcs to pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re\-authentication to occur.
.TP
setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [...]
@@ -240,11 +240,11 @@ be used around the cluster. \fB\-\-mcast0\fR defaults to 239.255.1.1 and
ttl defaults to 1. If \fB\-\-broadcast\fR is specified, \fB\-\-mcast0/1\fR,
\fB\-\-mcastport0/1\fR & \fB\-\-ttl0/1\fR are ignored.
.TP
-start [\fB\-\-all\fR | <node>... ] [\fB\-\-wait\fR[=<n>]]
-Start corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are started on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are started on all nodes. If \fB\-\-wait\fR is specified, wait up to 'n' seconds for nodes to start.
+start [\fB\-\-all\fR | <node>... ] [\fB\-\-wait\fR[=<n>]] [\fB\-\-request\-timeout\fR=<seconds>]
+Start a cluster on specified node(s). If no nodes are specified then start a cluster on the local node. If \fB\-\-all\fR is specified then start a cluster on all nodes. If the cluster has many nodes then the start request may time out. In that case you should consider setting \fB\-\-request\-timeout\fR to a suitable value. If \fB\-\-wait\fR is specified, pcs waits up to 'n' seconds for the cluster to get ready to provide services after the cluster has successfully started.
.TP
stop [\fB\-\-all\fR | <node>... ] [\fB\-\-request\-timeout\fR=<seconds>]
-Stop corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are stopped on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are stopped on all nodes. If the cluster is running resources which take long time to stop, the request may time out before the cluster actually stops. In that case you should consider setting \fB\-\-request\-timeout\fR to a suitable value.
+Stop a cluster on specified node(s). If no nodes are specified then stop a cluster on the local node. If \fB\-\-all\fR is specified then stop a cluster on all nodes. If the cluster is running resources which take long time to stop then the stop request may time out before the cluster actually stops. In that case you should consider setting \fB\-\-request\-timeout\fR to a suitable value.
.TP
kill
Force corosync and pacemaker daemons to stop on the local node (performs kill \-9). Note that init system (e.g. systemd) can detect that cluster is not running and start it again. If you want to stop cluster on a node, run pcs cluster stop on that node.
@@ -326,7 +326,7 @@ Permanently destroy the cluster on the current node, killing all cluster process
verify [\fB\-V\fR] [filename]
Checks the pacemaker configuration (cib) for syntax and common conceptual errors. If no filename is specified the check is performed on the currently running cluster. If \fB\-V\fR is used more verbose output will be printed.
.TP
-report [\fB\-\-from\fR "YYYY\-M\-D H:M:S" [\fB\-\-to\fR "YYYY\-M\-D H:M:S"]] dest
+report [\fB\-\-from\fR "YYYY\-M\-D H:M:S" [\fB\-\-to\fR "YYYY\-M\-D H:M:S"]] <dest>
Create a tarball containing everything needed when reporting cluster problems. If \fB\-\-from\fR and \fB\-\-to\fR are not used, the report will include the past 24 hours.
.SS "stonith"
.TP
@@ -377,7 +377,7 @@ fence <node> [\fB\-\-off\fR]
Fence the node specified (if \fB\-\-off\fR is specified, use the 'off' API call to stonith which will turn the node off instead of rebooting it).
.TP
confirm <node> [\fB\-\-force\fR]
-Confirm that the host specified is currently down. This command should \fBONLY\fR be used when the node specified has already been confirmed to be powered off and to have no access to shared resources.
+Confirm to the cluster that the specified node is powered off. This allows the cluster to recover from a situation where no stonith device is able to fence the node. This command should \fBONLY\fR be used after manually ensuring that the node is powered off and has no access to shared resources.
.B WARNING: If this node is not actually powered off or it does have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed.
@@ -812,6 +812,8 @@ Create the MyStonith stonith fence_virt device which can fence host 'f1'
.TP
Set the stonith\-enabled property to false on the cluster (which disables stonith)
.B # pcs property set stonith\-enabled=false
+.SH USING \-\-FORCE IN PCS COMMANDS
+Various pcs commands accept the \fB\-\-force\fR option. Its purpose is to override some of checks that pcs is doing or some of errors that may occur when a pcs command is run. When such error occurs, pcs will print the error with a note it may be overridden. The exact behavior of the option is different for each pcs command. Using the \fB\-\-force\fR option can lead into situations that would normally be prevented by logic of pcs commands and therefore \fBits use is strongly discouraged [...]
.SH ENVIRONMENT VARIABLES
.TP
EDITOR
diff --git a/pcs/pcsd.py b/pcs/pcsd.py
index 7f7c660..657cbc9 100644
--- a/pcs/pcsd.py
+++ b/pcs/pcsd.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import errno
diff --git a/pcs/prop.py b/pcs/prop.py
index e84e42a..3817d75 100644
--- a/pcs/prop.py
+++ b/pcs/prop.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/qdevice.py b/pcs/qdevice.py
index a6dcf7e..15ff14f 100644
--- a/pcs/qdevice.py
+++ b/pcs/qdevice.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/quorum.py b/pcs/quorum.py
index 937b057..51469d6 100644
--- a/pcs/quorum.py
+++ b/pcs/quorum.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/resource.py b/pcs/resource.py
index dd864b6..3812cb7 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
@@ -159,7 +158,9 @@ def resource_cmd(argv):
if len(argv_next) == 0:
show_defaults("op_defaults")
else:
- set_default("op_defaults", argv_next)
+ lib.cib_options.set_operations_defaults(
+ prepare_options(argv_next)
+ )
elif op_subcmd == "add":
if len(argv_next) == 0:
usage.resource(["op"])
@@ -182,7 +183,9 @@ def resource_cmd(argv):
if len(argv_next) == 0:
show_defaults("rsc_defaults")
else:
- set_default("rsc_defaults", argv_next)
+ lib.cib_options.set_resources_defaults(
+ prepare_options(argv_next)
+ )
elif sub_cmd == "cleanup":
resource_cleanup(argv_next)
elif sub_cmd == "history":
@@ -763,7 +766,8 @@ def resource_update(res_id,args, deal_with_guest_change=True):
allow_invalid=("--force" in utils.pcs_options),
update=True
)
- utils.process_library_reports(report_list)
+ if report_list:
+ utils.process_library_reports(report_list)
except lib_ra.ResourceAgentError as e:
severity = (
ReportItemSeverity.WARNING if "--force" in utils.pcs_options
@@ -2258,18 +2262,6 @@ def show_defaults(def_type, indent=""):
if not foundDefault:
print(indent + "No defaults set")
-def set_default(def_type, argv):
- warn(
- "Defaults do not apply to resources which override them with their "
- "own defined values"
- )
- for arg in argv:
- args = arg.split('=')
- if (len(args) != 2):
- print("Invalid Property: " + arg)
- continue
- utils.setAttribute(def_type, args[0], args[1], exit_on_error=True)
-
def print_node(node, tab = 0):
spaces = " " * tab
if node.tag == "group":
@@ -2427,8 +2419,7 @@ def get_attrs(node, prepend_string = "", append_string = ""):
output += attr + "=" + val + " "
if output != "":
return prepend_string + output.rstrip() + append_string
- else:
- return output.rstrip()
+ return output.rstrip()
def resource_cleanup(argv):
resource = None
diff --git a/pcs/rule.py b/pcs/rule.py
index 896c1ad..329c52f 100644
--- a/pcs/rule.py
+++ b/pcs/rule.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import re
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index d1ac3c9..85117d0 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -27,7 +27,7 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
crmd_binary = "/usr/libexec/pacemaker/crmd"
cib_binary = "/usr/libexec/pacemaker/cib"
stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.159"
+pcs_version = "0.9.160"
crm_report = pacemaker_binaries + "crm_report"
crm_verify = pacemaker_binaries + "crm_verify"
crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
@@ -38,6 +38,7 @@ pcsd_tokens_location = "/var/lib/pcsd/tokens"
pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
pcsd_exec_location = "/usr/lib/pcsd/"
+pcsd_default_port = 2224
cib_dir = "/var/lib/pacemaker/cib/"
pacemaker_uname = "hacluster"
pacemaker_gname = "haclient"
diff --git a/pcs/status.py b/pcs/status.py
index 33bf1d1..b2e65ec 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
diff --git a/pcs/stonith.py b/pcs/stonith.py
index aa5fbcd..6408285 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import json
diff --git a/pcs/test/cib_resource/common.py b/pcs/test/cib_resource/common.py
index f1cf918..e92dd61 100644
--- a/pcs/test/cib_resource/common.py
+++ b/pcs/test/cib_resource/common.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
diff --git a/pcs/test/cib_resource/stonith_common.py b/pcs/test/cib_resource/stonith_common.py
index 59a697c..7f926b5 100644
--- a/pcs/test/cib_resource/stonith_common.py
+++ b/pcs/test/cib_resource/stonith_common.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import logging
diff --git a/pcs/test/cib_resource/test_bundle.py b/pcs/test/cib_resource/test_bundle.py
index 50ea1df..7f66253 100644
--- a/pcs/test/cib_resource/test_bundle.py
+++ b/pcs/test/cib_resource/test_bundle.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
index a5f9e0f..eecc9b6 100644
--- a/pcs/test/cib_resource/test_create.py
+++ b/pcs/test/cib_resource/test_create.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.misc import (
@@ -51,9 +50,18 @@ class Success(ResourceTest):
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
+ <op id="R-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="R-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="R-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="R-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
<op id="R-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -376,9 +384,18 @@ class SuccessOperations(ResourceTest):
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
+ <op id="R-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="R-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="R-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="R-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
<op id="R-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -396,9 +413,18 @@ class SuccessOperations(ResourceTest):
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
+ <op id="R-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="R-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="R-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="R-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
<op id="R-start-interval-0s" interval="0s" name="start"
timeout="200"
/>
@@ -438,6 +464,9 @@ class SuccessOperations(ResourceTest):
<op id="R-monitor-interval-11" interval="11"
name="monitor" role="Slave" timeout="20"
/>
+ <op id="R-notify-interval-0s" interval="0s"
+ name="notify" timeout="5"
+ />
<op id="R-start-interval-0s" interval="0s" name="start"
timeout="20"
/>
@@ -663,9 +692,18 @@ class SuccessMaster(ResourceTest):
/>
</instance_attributes>
<operations>
+ <op id="R-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="R-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="R-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="R-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
<op id="R-start-interval-0s" interval="0s"
name="start" timeout="20"
/>
@@ -798,9 +836,18 @@ class SuccessClone(ResourceTest):
/>
</meta_attributes>
<operations>
+ <op id="R-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="R-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="R-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="R-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
<op id="R-start-interval-0s" interval="0s"
name="start" timeout="20"
/>
@@ -834,9 +881,18 @@ class SuccessClone(ResourceTest):
type="Dummy"
>
<operations>
+ <op id="R-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="R-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="R-monitor-interval-10" interval="10"
name="monitor"
/>
+ <op id="R-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
<op id="R-start-interval-0s" interval="0s"
name="start" timeout="20"
/>
@@ -868,9 +924,18 @@ class SuccessClone(ResourceTest):
type="Dummy"
>
<operations>
+ <op id="R-migrate_from-interval-0s" interval="0s"
+ name="migrate_from" timeout="20"
+ />
+ <op id="R-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="20"
+ />
<op id="R-monitor-interval-10" interval="10"
name="monitor" timeout="20"
/>
+ <op id="R-reload-interval-0s" interval="0s"
+ name="reload" timeout="20"
+ />
<op id="R-start-interval-0s" interval="0s"
name="start" timeout="20"
/>
diff --git a/pcs/test/cib_resource/test_manage_unmanage.py b/pcs/test/cib_resource/test_manage_unmanage.py
index 2a87cd3..744128a 100644
--- a/pcs/test/cib_resource/test_manage_unmanage.py
+++ b/pcs/test/cib_resource/test_manage_unmanage.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
diff --git a/pcs/test/cib_resource/test_operation_add.py b/pcs/test/cib_resource/test_operation_add.py
index 7b43754..d48f0e9 100644
--- a/pcs/test/cib_resource/test_operation_add.py
+++ b/pcs/test/cib_resource/test_operation_add.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
diff --git a/pcs/test/cib_resource/test_stonith_create.py b/pcs/test/cib_resource/test_stonith_create.py
index ed3afc6..356319e 100644
--- a/pcs/test/cib_resource/test_stonith_create.py
+++ b/pcs/test/cib_resource/test_stonith_create.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs import utils
diff --git a/pcs/test/cib_resource/test_stonith_enable_disable.py b/pcs/test/cib_resource/test_stonith_enable_disable.py
index 956be0d..7ea849e 100644
--- a/pcs/test/cib_resource/test_stonith_enable_disable.py
+++ b/pcs/test/cib_resource/test_stonith_enable_disable.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.cib_resource.common import ResourceTest
diff --git a/pcs/test/curl_test.py b/pcs/test/curl_test.py
new file mode 100644
index 0000000..094cdea
--- /dev/null
+++ b/pcs/test/curl_test.py
@@ -0,0 +1,63 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+# This module is intented to just test new Communicator/MultiringCommunicator
+# classes which are using curllib
+
+import os.path
+import sys
+import pprint
+import logging
+
+PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, PACKAGE_DIR)
+
+from pcs.common.node_communicator import Request, RequestData, RequestTarget
+from pcs import utils
+
+logger_handler = logging.StreamHandler()
+logger_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+logger = logging.getLogger("old_cli")
+logger.setLevel(logging.DEBUG)
+logger.addHandler(logger_handler)
+
+global_target = RequestTarget(
+ "TestServer", ["httpbin.org2", "httpbin.org"], port=443
+)
+
+pprint.pprint(global_target)
+
+def get_request(timeout):
+ return Request(global_target, RequestData("delay/{0}".format(timeout)))
+
+lib_env = utils.get_lib_env()
+# utils.pcs_options["--debug"] = True
+request_list = [get_request((i + 1) * 2) for i in range(6)]
+factory = lib_env.get_node_communicator_factory()
+factory._request_timeout = 10
+communicator = factory.get_multiring_communicator()
+# communicator.add_requests([get_request(10)])
+# response = list(communicator.start_loop())[0]
+# pprint.pprint(response.to_report_item())
+communicator.add_requests(request_list)
+for response in communicator.start_loop():
+ # print(80 * "-")
+ # print(response.request.url)
+ # print(response.data)
+ # print(80 * "-")
+ if response.request == request_list[2]:
+ r = get_request(5)
+ request_list.append(r)
+ communicator.add_requests([r])
+ if response.request == request_list[5]:
+ r = get_request(10)
+ request_list.append(r)
+ communicator.add_requests([r])
+ if len(request_list) == 8 and response.request == request_list[7]:
+ r = get_request(15)
+ communicator.add_requests([r])
+
+
diff --git a/pcs/test/resources/cib-empty.xml b/pcs/test/resources/cib-empty.xml
index e0bb57f..04923b7 100644
--- a/pcs/test/resources/cib-empty.xml
+++ b/pcs/test/resources/cib-empty.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.12" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-large.xml b/pcs/test/resources/cib-large.xml
index 2715a91..4f7532b 100644
--- a/pcs/test/resources/cib-large.xml
+++ b/pcs/test/resources/cib-large.xml
@@ -1,4 +1,4 @@
-<cib admin_epoch="0" cib-last-written="Thu Aug 23 16:49:17 2012" crm_feature_set="3.0.6" dc-uuid="2" epoch="1308" have-quorum="0" num_updates="1" update-client="crmd" update-origin="rh7-3" validate-with="pacemaker-1.2">
+<cib admin_epoch="0" cib-last-written="Thu Aug 23 16:49:17 2012" crm_feature_set="3.0.12" dc-uuid="2" epoch="1308" have-quorum="0" num_updates="1" update-client="crmd" update-origin="rh7-3" validate-with="pacemaker-1.2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml
index cd9e60b..bfd9522 100644
--- a/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml
+++ b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml
@@ -44,6 +44,6 @@ Fake attribute that can be changed to cause a reload
<action name="migrate_to" timeout="20" />
<action name="migrate_from" timeout="20" />
<action name="meta-data" timeout="5" />
-<action name="validate-all" timeout="20" />
+<action name="validate-all" timeout="20" />
</actions>
</resource-agent>
diff --git a/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy_insane_action.xml
similarity index 93%
copy from pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml
copy to pcs/test/resources/resource_agent_ocf_heartbeat_dummy_insane_action.xml
index cd9e60b..12f639d 100644
--- a/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml
+++ b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy_insane_action.xml
@@ -40,10 +40,11 @@ Fake attribute that can be changed to cause a reload
<action name="start" timeout="20" />
<action name="stop" timeout="20" />
<action name="monitor" timeout="20" interval="10" depth="0" />
+<action name="moni*tor" timeout="20" interval="20" depth="0" />
<action name="reload" timeout="20" />
<action name="migrate_to" timeout="20" />
<action name="migrate_from" timeout="20" />
<action name="meta-data" timeout="5" />
-<action name="validate-all" timeout="20" />
+<action name="validate-all" timeout="20" />
</actions>
</resource-agent>
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
index fcac586..cce2d00 100755
--- a/pcs/test/suite.py
+++ b/pcs/test/suite.py
@@ -3,7 +3,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
import os.path
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index a7ac41a..6df6000 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -2,17 +2,16 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
from pcs.test.tools import pcs_unittest as unittest
-from pcs.test.tools.assertions import AssertPcsMixin
-from pcs.test.tools.misc import (
+from pcs.test.tools.assertions import (
ac,
- get_test_resource as rc,
+ AssertPcsMixin,
)
+from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_runner import (
pcs,
PcsRunner,
@@ -33,17 +32,8 @@ class ACLTest(unittest.TestCase, AssertPcsMixin):
self.assert_pcs_success(
'acl show',
- "ACLs are disabled, run 'pcs acl enable' to enable\n\n"
- )
-
- with open(temp_cib) as myfile:
- data = myfile.read()
- assert data.find("pacemaker-1.2") != -1
- assert data.find("pacemaker-2.") == -1
-
- self.assert_pcs_success(
- 'acl role create test_role read xpath my_xpath',
- "CIB has been upgraded to the latest schema version.\n"
+ "ACLs are disabled, run 'pcs acl enable' to enable"
+ "\n\nCIB has been upgraded to the latest schema version.\n"
)
with open(temp_cib) as myfile:
@@ -887,4 +877,3 @@ Role: role4
"acl role unassign role1 from group group1",
"Error: Role 'role1' is not assigned to 'group1'\n"
)
-
diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
index 5b4fc48..e643089 100644
--- a/pcs/test/test_alert.py
+++ b/pcs/test/test_alert.py
@@ -1,9 +1,7 @@
-
from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py
index 2bbfe96..eac7d13 100644
--- a/pcs/test/test_booth.py
+++ b/pcs/test/test_booth.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
@@ -397,9 +396,9 @@ class ConfigTest(unittest.TestCase, BoothMixin):
ensure_booth_config_not_exists()
self.assert_pcs_fail(
"booth config",
- "Error: Booth config file '{0}' does not exist\n".format(
- BOOTH_CONFIG_FILE
- )
+ "Error: Booth config file '{0}' does not exist\n"
+ "Error: Errors have occurred, therefore pcs is unable to continue\n"
+ .format(BOOTH_CONFIG_FILE)
)
def test_too_much_args(self):
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 76930c7..01998f0 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
@@ -10,11 +9,14 @@ import shutil
import socket
from pcs.test.tools import pcs_unittest as unittest
-from pcs.test.tools.assertions import AssertPcsMixin
-from pcs.test.tools.misc import (
+from pcs.test.tools.assertions import (
ac,
+ AssertPcsMixin,
+)
+from pcs.test.tools.misc import (
get_test_resource as rc,
skip_unless_pacemaker_version,
+ skip_if_service_enabled,
outdent,
)
from pcs.test.tools.pcs_runner import (
@@ -523,6 +525,7 @@ logging {
)
self.assertEqual(1, returnVal)
+ @skip_if_service_enabled("sbd")
def test_cluster_setup_2_nodes_no_atb(self):
# Setup a 2 node cluster and make sure the two node config is set, then
# add a node and make sure that it's unset, then remove a node and make
diff --git a/pcs/test/test_cluster_pcmk_remote.py b/pcs/test/test_cluster_pcmk_remote.py
index 0db4a5c..e504fd8 100644
--- a/pcs/test/test_cluster_pcmk_remote.py
+++ b/pcs/test/test_cluster_pcmk_remote.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.cib_resource.common import ResourceTest
@@ -69,9 +68,18 @@ class NodeAddRemote(ResourceTest):
/>
</instance_attributes>
<operations>
+ <op id="node-name-migrate_from-interval-0s"
+ interval="0s" name="migrate_from" timeout="60"
+ />
+ <op id="node-name-migrate_to-interval-0s" interval="0s"
+ name="migrate_to" timeout="60"
+ />
<op id="node-name-monitor-interval-60s" interval="60s"
name="monitor" timeout="30"
/>
+ <op id="node-name-reload-interval-0s" interval="0s"
+ name="reload" timeout="60"
+ />
<op id="node-name-start-interval-0s" interval="0s"
name="start" timeout="60"
/>
diff --git a/pcs/test/test_common_tools.py b/pcs/test/test_common_tools.py
index d9b6af3..491af95 100644
--- a/pcs/test/test_common_tools.py
+++ b/pcs/test/test_common_tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index 4160b01..9f0bc5d 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
@@ -10,10 +9,13 @@ import os
import shutil
from pcs.test.tools import pcs_unittest as unittest
-from pcs.test.tools.assertions import AssertPcsMixin, console_report
+from pcs.test.tools.assertions import (
+ ac,
+ AssertPcsMixin,
+ console_report,
+)
from pcs.test.tools.cib import get_assert_pcs_effect_mixin
from pcs.test.tools.misc import (
- ac,
get_test_resource as rc,
skip_unless_pacemaker_supports_bundle,
skip_unless_pacemaker_version,
@@ -23,10 +25,13 @@ from pcs.test.tools.pcs_runner import pcs, PcsRunner
empty_cib = rc("cib-empty.xml")
-empty_cib_1_2 = rc("cib-empty-1.2.xml")
temp_cib = rc("temp-cib.xml")
large_cib = rc("cib-large.xml")
+skip_unless_location_resource_discovery = skip_unless_pacemaker_version(
+ (1, 1, 12),
+ "constraints with the resource-discovery option"
+)
skip_unless_location_rsc_pattern = skip_unless_pacemaker_version(
(1, 1, 16),
"location constraints with resource patterns"
@@ -542,11 +547,8 @@ Colocation Constraints:
ac(output, "")
self.assertEqual(0, retValue)
- @skip_unless_pacemaker_version(
- (1, 1, 12),
- "constraints with the resource-discovery option"
- )
- def testConstraintResourceDiscovery(self):
+ @skip_unless_location_resource_discovery
+ def testConstraintResourceDiscoveryRules(self):
o,r = pcs("resource create crd ocf:heartbeat:Dummy")
ac(o,"")
assert r == 0
@@ -581,20 +583,18 @@ Colocation Constraints:
])+'\n')
assert r == 0
- o,r = pcs("constraint delete location-crd")
+ @skip_unless_location_resource_discovery
+ def testConstraintResourceDiscovery(self):
+ o,r = pcs("resource create crd ocf:heartbeat:Dummy")
ac(o,"")
- assert r==0
+ assert r == 0
- o,r = pcs("constraint delete location-crd1")
+ o,r = pcs("resource create crd1 ocf:heartbeat:Dummy")
ac(o,"")
- assert r==0
-
- o,r = pcs("constraint --full")
- ac(o,"Location Constraints:\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
assert r == 0
o,r = pcs("constraint location add my_constraint_id crd my_node -INFINITY resource-discovery=always")
- ac(o,"")
+ ac(o,"Cluster CIB has been upgraded to latest version\n")
assert r == 0
o,r = pcs("constraint location add my_constraint_id2 crd1 my_node -INFINITY resource-discovery=never")
diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py
index 756afa8..c006291 100644
--- a/pcs/test/test_lib_commands_qdevice.py
+++ b/pcs/test/test_lib_commands_qdevice.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
index 70c76f0..5f77f38 100644
--- a/pcs/test/test_lib_commands_quorum.py
+++ b/pcs/test/test_lib_commands_quorum.py
@@ -2,21 +2,18 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import logging
-from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase, skip
from pcs.test.tools.assertions import (
+ ac,
assert_raise_library_error,
assert_report_item_list_equal,
)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.misc import (
- ac,
- get_test_resource as rc,
-)
+from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_unittest import mock
from pcs.common import report_codes
@@ -510,6 +507,7 @@ class StatusDeviceTextTest(TestCase, CmanMixin):
mock_status.assert_called_once_with("mock_runner", True)
+ at skip("TODO: rewrite using new testing fremework")
@mock.patch.object(LibraryEnvironment, "push_corosync_conf")
@mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
@mock.patch("pcs.lib.commands.quorum._add_device_model_net")
@@ -862,7 +860,7 @@ class AddDeviceTest(TestCase, CmanMixin):
self.assertEqual(3, len(mock_client_enable.mock_calls))
self.assertEqual(3, len(mock_client_start.mock_calls))
-
+ at skip("TODO: rewrite using new testing fremework")
@mock.patch(
"pcs.lib.commands.quorum.qdevice_net.remote_client_import_certificate_and_key"
)
@@ -1503,6 +1501,7 @@ class AddDeviceNetTest(TestCase):
mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
+ at skip("TODO: rewrite using new testing fremework")
@mock.patch.object(LibraryEnvironment, "push_corosync_conf")
@mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
@mock.patch("pcs.lib.commands.quorum._remove_device_model_net")
@@ -1794,6 +1793,7 @@ class RemoveDeviceTest(TestCase, CmanMixin):
mock_atb_check.assert_not_called()
+ at skip("TODO: rewrite using new testing fremework")
@mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy")
@mock.patch.object(
LibraryEnvironment,
diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
index 6017371..18b0d91 100644
--- a/pcs/test/test_lib_commands_sbd.py
+++ b/pcs/test/test_lib_commands_sbd.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import logging
@@ -15,10 +14,12 @@ from pcs.test.tools.assertions import (
assert_report_item_list_equal,
)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.integration_lib import Runner, Call
+from pcs.test.tools.integration_lib import Runner
+from pcs.test.tools.command_env.mock_runner import Call
from pcs import settings
from pcs.common import report_codes
+from pcs.common.node_communicator import RequestTarget
from pcs.lib.errors import (
ReportItemSeverity as Severities,
LibraryError,
@@ -31,7 +32,6 @@ from pcs.lib.node import (
)
from pcs.lib.external import (
NodeCommunicator,
- NodeConnectionException,
CommandRunner,
)
import pcs.lib.commands.sbd as cmd_sbd
@@ -400,8 +400,8 @@ class ValidateWatchdogDictTest(TestCase):
class ValidateDeviceDictTest(TestCase):
def test_all_ok(self):
device_dict = {
- NodeAddresses("node1"): ["/dev1", "/dev2"],
- NodeAddresses("node2"): ["/dev1"],
+ "node1": ["/dev1", "/dev2"],
+ "node2": ["/dev1"],
}
self.assertEqual([], cmd_sbd._validate_device_dict(device_dict))
@@ -410,10 +410,10 @@ class ValidateDeviceDictTest(TestCase):
"dev" + str(i) for i in range(settings.sbd_max_device_num + 1)
]
device_dict = {
- NodeAddresses("node1"): [],
- NodeAddresses("node2"): too_many_devices,
- NodeAddresses("node3"): ["/dev/vda"],
- NodeAddresses("node4"): ["/dev/vda1", "../dev/sda2"],
+ "node1": [],
+ "node2": too_many_devices,
+ "node3": ["/dev/vda"],
+ "node4": ["/dev/vda1", "../dev/sda2"],
}
assert_report_item_list_equal(
cmd_sbd._validate_device_dict(device_dict),
@@ -512,326 +512,41 @@ class CheckNodeNamesInClusterTest(TestCase):
)
-class GetFullNodeDictTest(TestCase):
+class GetFullTargetDictTest(TestCase):
def setUp(self):
- self.node_list = NodeAddressesList([
- NodeAddresses("node1"),
- NodeAddresses("node2"),
- NodeAddresses("node3"),
- ])
+ self.target_list = [
+ RequestTarget("node{0}".format(i)) for i in range(1, 4)
+ ]
def test_not_using_default(self):
- node_dict = dict([
+ target_dict = dict([
("node" + str(i), "val" + str(i)) for i in range(4)
])
expected = {
- self.node_list[0]: "val1",
- self.node_list[1]: "val2",
- self.node_list[2]: "val3",
+ self.target_list[0].label: "val1",
+ self.target_list[1].label: "val2",
+ self.target_list[2].label: "val3",
}
self.assertEqual(
expected,
- cmd_sbd._get_full_node_dict(self.node_list, node_dict, None)
+ cmd_sbd._get_full_target_dict(self.target_list, target_dict, None)
)
def test_using_default(self):
- node_dict = dict([
+ target_dict = dict([
("node" + str(i), "val" + str(i)) for i in range(3)
])
default = "default"
expected = {
- self.node_list[0]: "val1",
- self.node_list[1]: "val2",
- self.node_list[2]: default,
+ self.target_list[0].label: "val1",
+ self.target_list[1].label: "val2",
+ self.target_list[2].label: default,
}
self.assertEqual(
expected,
- cmd_sbd._get_full_node_dict(self.node_list, node_dict, default)
- )
-
-
- at mock.patch("pcs.lib.commands.sbd._get_cluster_nodes")
- at mock.patch("pcs.lib.sbd.check_sbd")
-class GetClusterSbdStatusTest(CommandSbdTest):
- def test_success(self, mock_check_sbd, mock_get_nodes):
- def ret_val(communicator, node, empty_str, empty_list):
- self.assertEqual(communicator, self.mock_com)
- self.assertEqual(empty_str, "")
- if node.label == "node0":
- return """{
- "sbd": {
- "installed": true,
- "enabled": true,
- "running": false
- }
- }"""
- elif node.label == "node1":
- return """{
- "sbd": {
- "installed": false,
- "enabled": false,
- "running": false
- }
- }"""
- elif node.label == "node2":
- return """{
- "sbd": {
- "installed": true,
- "enabled": false,
- "running": false
- }
- }"""
- else:
- raise AssertionError(
- "Unexpected call: node={node}, node.label={label}".format(
- node=str(node), label=node.label
- )
- )
-
- mock_check_sbd.side_effect = ret_val
- self.mock_env.is_cman_cluster = False
- mock_get_nodes.return_value = self.node_list
- expected = [
- {
- "node": "node0",
- "status": {
- "installed": True,
- "enabled": True,
- "running": False
- }
- },
- {
- "node": "node1",
- "status": {
- "installed": False,
- "enabled": False,
- "running": False
- }
- },
- {
- "node": "node2",
- "status": {
- "installed": True,
- "enabled": False,
- "running": False
- }
- }
- ]
- _assert_equal_list_of_dictionaries_without_order(
- expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
- )
- mock_get_nodes.assert_called_once_with(self.mock_env)
- self.assertEqual(3, mock_check_sbd.call_count)
- self.assertEqual(self.mock_log.warning.call_count, 0)
-
- def test_failures(self, mock_check_sbd, mock_get_nodes):
- def ret_val(communicator, node, empty_str, empty_list):
- self.assertEqual(communicator, self.mock_com)
- self.assertEqual(empty_str, "")
- if node.label == "node0":
- return """{
- "not_sbd": {
- "installed": true,
- "enabled": true,
- "running": false
- }
- }"""
- elif node.label == "node1":
- raise NodeConnectionException(node.label, "command", "reason")
- elif node.label == "node2":
- return "invalid_json"
- else:
- raise AssertionError(
- "Unexpected call: node={node}, node.label={label}".format(
- node=str(node), label=node.label
- )
- )
-
- mock_check_sbd.side_effect = ret_val
- self.mock_env.is_cman_cluster = False
- mock_get_nodes.return_value = self.node_list
- all_none = {
- "installed": None,
- "enabled": None,
- "running": None
- }
- expected = [
- {
- "node": "node0",
- "status": all_none
- },
- {
- "node": "node1",
- "status": all_none
- },
- {
- "node": "node2",
- "status": all_none
- }
- ]
-
- _assert_equal_list_of_dictionaries_without_order(
- expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
- )
- mock_get_nodes.assert_called_once_with(self.mock_env)
- self.assertEqual(3, mock_check_sbd.call_count)
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [
- (
- Severities.WARNING,
- report_codes.UNABLE_TO_GET_SBD_STATUS,
- {"node": "node0"}
- ),
- (
- Severities.WARNING,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": "node1",
- "reason": "reason",
- "command": "command",
- }
- ),
- (
- Severities.WARNING,
- report_codes.UNABLE_TO_GET_SBD_STATUS,
- {"node": "node1"}
- ),
- (
- Severities.WARNING,
- report_codes.UNABLE_TO_GET_SBD_STATUS,
- {"node": "node2"}
- )
- ]
- )
-
- at mock.patch("pcs.lib.commands.sbd._get_cluster_nodes")
- at mock.patch("pcs.lib.sbd.get_sbd_config")
-class GetClusterSbdConfigTest(CommandSbdTest):
- def test_success(self, mock_sbd_cfg, mock_get_nodes):
- this = self
-
- def ret_val(communicator, node):
- this.assertEqual(communicator, this.mock_com)
- if node.label == "node0":
- return """\
-# comment
-SBD_TEST=true
-ANOTHER_OPT=1
-"""
- elif node.label == "node1":
- return """\
-OPTION= value
-
-"""
- elif node.label == "node2":
- return """\
-
-# just comment
-
-"""
- else:
- raise AssertionError(
- "Unexpected call: node={node}, node.label={label}".format(
- node=str(node), label=node.label
- )
- )
-
- mock_sbd_cfg.side_effect = ret_val
- self.mock_env.is_cman_cluster = False
- mock_get_nodes.return_value = self.node_list
- expected = [
- {
- "node": "node0",
- "config": {
- "SBD_TEST": "true",
- "ANOTHER_OPT": "1"
- }
- },
- {
- "node": "node1",
- "config": {"OPTION": "value"}
- },
- {
- "node": "node2",
- "config": {}
- }
- ]
-
- _assert_equal_list_of_dictionaries_without_order(
- expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
- )
- mock_get_nodes.assert_called_once_with(self.mock_env)
- self.assertEqual(3, mock_sbd_cfg.call_count)
- self.assertEqual(self.mock_log.warning.call_count, 0)
-
- def test_few_failures(self, mock_sbd_cfg, mock_get_nodes):
- def ret_val(communicator, node):
- self.assertEqual(communicator, self.mock_com)
- if node.label == "node0":
- return """\
- # comment
- SBD_TEST=true
- ANOTHER_OPT=1
- """
- elif node.label == "node1":
- return """\
-invalid value
-
- """
- elif node.label == "node2":
- raise NodeConnectionException(node.label, "command", "reason")
- else:
- raise AssertionError(
- "Unexpected call: node={node}, node.label={label}".format(
- node=str(node), label=node.label
- )
- )
-
- mock_sbd_cfg.side_effect = ret_val
- self.mock_env.is_cman_cluster = False
- mock_get_nodes.return_value = self.node_list
- expected = [
- {
- "node": "node0",
- "config": {
- "SBD_TEST": "true",
- "ANOTHER_OPT": "1"
- }
- },
- {
- "node": "node1",
- "config": {}
- },
- {
- "node": "node2",
- "config": None
- }
- ]
-
- _assert_equal_list_of_dictionaries_without_order(
- expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
- )
- mock_get_nodes.assert_called_once_with(self.mock_env)
- self.assertEqual(3, mock_sbd_cfg.call_count)
- assert_report_item_list_equal(
- self.mock_rep.report_item_list,
- [
- (
- Severities.WARNING,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": "node2",
- "reason": "reason",
- "command": "command",
- }
- ),
- (
- Severities.WARNING,
- report_codes.UNABLE_TO_GET_SBD_CONFIG,
- {"node": "node2"}
- ),
- ]
+ cmd_sbd._get_full_target_dict(
+ self.target_list, target_dict, default
+ )
)
@@ -1227,4 +942,3 @@ class SetMessageTest(CommonTest):
)
)
runner.assert_everything_launched()
-
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
index 1bf4716..edc538d 100644
--- a/pcs/test/test_lib_corosync_config_facade.py
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -2,21 +2,18 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
import re
from pcs.test.tools.assertions import (
+ ac,
assert_raise_library_error,
assert_report_item_list_equal,
)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.misc import (
- ac,
- get_test_resource as rc,
-)
+from pcs.test.tools.misc import get_test_resource as rc
from pcs.common import report_codes
from pcs.lib.errors import ReportItemSeverity as severity
diff --git a/pcs/test/test_lib_corosync_config_parser.py b/pcs/test/test_lib_corosync_config_parser.py
index a68710b..803d458 100644
--- a/pcs/test/test_lib_corosync_config_parser.py
+++ b/pcs/test/test_lib_corosync_config_parser.py
@@ -2,12 +2,11 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools import pcs_unittest as unittest
-from pcs.test.tools.misc import ac
+from pcs.test.tools.assertions import ac
from pcs.lib.corosync import config_parser
diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
index 3a4ecce..7afeb9f 100644
--- a/pcs/test/test_lib_corosync_live.py
+++ b/pcs/test/test_lib_corosync_live.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
@@ -11,7 +10,7 @@ import os.path
from pcs.test.tools.assertions import assert_raise_library_error
from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.pcs_unittest import mock, skip
from pcs import settings
from pcs.common import report_codes
@@ -71,7 +70,7 @@ class GetLocalClusterConfTest(TestCase):
)
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.corosync.DistributeCorosyncConf")
class SetRemoteCorosyncConfTest(TestCase):
def test_success(self):
config = "test {\nconfig: data\n}\n"
diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py
index 8c32c36..2519134 100644
--- a/pcs/test/test_lib_corosync_qdevice_client.py
+++ b/pcs/test/test_lib_corosync_qdevice_client.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
index 21c526b..f9c3f1a 100644
--- a/pcs/test/test_lib_corosync_qdevice_net.py
+++ b/pcs/test/test_lib_corosync_qdevice_net.py
@@ -2,10 +2,9 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase, skip
import base64
import os.path
@@ -799,7 +798,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
_client_cert_tool, "-m", "-c", self.mock_tmpfile.name
])
-
+ at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.GetCaCert")
class RemoteQdeviceGetCaCertificate(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -852,7 +851,7 @@ class RemoteQdeviceGetCaCertificate(TestCase):
)
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.ClientSetup")
class RemoteClientSetupTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -884,7 +883,7 @@ class RemoteClientSetupTest(TestCase):
)
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.SignCertificate")
class RemoteSignCertificateRequestTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -949,7 +948,10 @@ class RemoteSignCertificateRequestTest(TestCase):
)
)
-
+ at skip(
+ "TODO: rewrite for "
+ "pcs.lib.communication.qdevice_net.ClientImportCertificateAndKey"
+)
class RemoteClientImportCertificateAndKeyTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -985,7 +987,7 @@ class RemoteClientImportCertificateAndKeyTest(TestCase):
)
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.qdevice_net.ClientDestroy")
class RemoteClientDestroy(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -1085,4 +1087,3 @@ some other line
}
)
)
-
diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
index 3868b0f..b249c47 100644
--- a/pcs/test/test_lib_external.py
+++ b/pcs/test/test_lib_external.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
@@ -26,11 +25,7 @@ from pcs.common import (
pcs_pycurl as pycurl,
report_codes,
)
-from pcs.lib import reports
-from pcs.lib.errors import (
- LibraryError,
- ReportItemSeverity as severity
-)
+from pcs.lib.errors import ReportItemSeverity as severity
import pcs.lib.external as lib
@@ -403,7 +398,6 @@ class CommandRunnerTest(TestCase):
]
)
-
@mock.patch(
"pcs.lib.external.pycurl.Curl",
autospec=True
@@ -1122,126 +1116,6 @@ class NodeCommunicatorExceptionTransformTest(TestCase):
self.assertTrue(raised)
-class ParallelCommunicationHelperTest(TestCase):
- def setUp(self):
- self.mock_reporter = MockLibraryReportProcessor()
-
- def fixture_raiser(self):
- def raiser(x, *args, **kwargs):
- if x == 1:
- raise lib.NodeConnectionException("node", "command", "reason")
- elif x == 2:
- raise LibraryError(
- reports.corosync_config_distribution_node_error("node")
- )
- return raiser
-
- def test_success(self):
- func = mock.MagicMock()
- lib.parallel_nodes_communication_helper(
- func,
- [([x], {"a": x*2,}) for x in range(3)],
- self.mock_reporter,
- skip_offline_nodes=False
- )
- expected_calls = [
- mock.call(0, a=0),
- mock.call(1, a=2),
- mock.call(2, a=4),
- ]
- self.assertEqual(len(expected_calls), len(func.mock_calls))
- func.assert_has_calls(expected_calls, any_order=True)
- self.assertEqual(self.mock_reporter.report_item_list, [])
-
- def test_errors(self):
- func = self.fixture_raiser()
- assert_raise_library_error(
- lambda: lib.parallel_nodes_communication_helper(
- func,
- [([x], {"a": x*2,}) for x in range(4)],
- self.mock_reporter,
- skip_offline_nodes=False
- ),
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": "node",
- "reason": "reason",
- "command": "command",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
- {
- "node": "node",
- }
- )
- )
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": "node",
- "reason": "reason",
- "command": "command",
- },
- report_codes.SKIP_OFFLINE_NODES
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
- {
- "node": "node",
- }
- )
- ]
- )
-
- def test_errors_skip_offline(self):
- func = self.fixture_raiser()
- assert_raise_library_error(
- lambda: lib.parallel_nodes_communication_helper(
- func,
- [([x], {"a": x*2,}) for x in range(4)],
- self.mock_reporter,
- skip_offline_nodes=True
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
- {
- "node": "node",
- }
- )
- )
- assert_report_item_list_equal(
- self.mock_reporter.report_item_list,
- [
- (
- severity.WARNING,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": "node",
- "reason": "reason",
- "command": "command",
- }
- ),
- (
- severity.ERROR,
- report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
- {
- "node": "node",
- }
- )
- ]
- )
-
class IsCmanClusterTest(TestCase):
def template_test(self, version_description, is_cman, corosync_retval=0):
mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
diff --git a/pcs/test/test_lib_node.py b/pcs/test/test_lib_node.py
index eca98a5..0a40421 100644
--- a/pcs/test/test_lib_node.py
+++ b/pcs/test/test_lib_node.py
@@ -2,16 +2,9 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-import sys
-
-from pcs.test.tools.pcs_unittest import TestCase, skipUnless
-
-#python 2.6 does not support sys.version_info.major
-need_python3 = skipUnless(sys.version_info[0] == 3, "test requires python3")
-need_python2 = skipUnless(sys.version_info[0] == 2, "test requires python2")
+from pcs.test.tools.pcs_unittest import TestCase
import pcs.lib.node as lib
@@ -115,7 +108,6 @@ class NodeAddressesTest(TestCase):
self.assertTrue(node0 < node1)
self.assertFalse(node1 < node0)
- at need_python3
class NodeAddressesRepr(TestCase):
def test_host_only_specified(self):
self.assertEqual(repr(lib.NodeAddresses("node0")), str(
@@ -146,37 +138,6 @@ class NodeAddressesRepr(TestCase):
)
)
- at need_python2
-class NodeAddressesRepr_python2(TestCase):
- def test_host_only_specified(self):
- self.assertEqual(repr(lib.NodeAddresses("node0")), str(
- "<pcs.lib.node.NodeAddresses [u'node0'], {'name': None, 'id': None}>"
- ))
-
- def test_host_and_name_specified(self):
- self.assertEqual(repr(lib.NodeAddresses("node0", name="name0")), str(
- "<pcs.lib.node.NodeAddresses [u'node0'],"
- " {'name': u'name0', 'id': None}>"
- ))
-
- def test_host_name_and_id_specified(self):
- self.assertEqual(
- repr(lib.NodeAddresses("node0", name="name0", id="id0")),
- str(
- "<pcs.lib.node.NodeAddresses [u'node0'],"
- " {'name': u'name0', 'id': u'id0'}>"
- )
- )
-
- def test_host_ring1_name_and_id_specified(self):
- self.assertEqual(
- repr(lib.NodeAddresses("node0", "node0-1", name="name0", id="id0")),
- str(
- "<pcs.lib.node.NodeAddresses [u'node0', u'node0-1'],"
- " {'name': u'name0', 'id': u'id0'}>"
- )
- )
-
class NodeAddressesListTest(TestCase):
def test_empty(self):
diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
index aa28d8e..3f1d9a1 100644
--- a/pcs/test/test_lib_sbd.py
+++ b/pcs/test/test_lib_sbd.py
@@ -2,12 +2,11 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import json
from pcs.test.tools.misc import outdent
-from pcs.test.tools.pcs_unittest import TestCase, mock
+from pcs.test.tools.pcs_unittest import TestCase, mock, skip
from pcs.test.tools.assertions import (
assert_raise_library_error,
@@ -20,7 +19,6 @@ from pcs.common import report_codes
from pcs.lib import reports
from pcs.lib.errors import (
ReportItemSeverity as Severities,
- ReportItem,
LibraryError,
)
from pcs.lib.node import NodeAddresses
@@ -38,55 +36,6 @@ class TestException(Exception):
pass
-class RunParallelAndRaiseLibErrorOnFailureTest(TestCase):
- def test_no_report_items(self):
- # test that no exception has been raised
- lib_sbd._run_parallel_and_raise_lib_error_on_failure(
- lambda: [],
- [([], {}) for _ in range(5)]
- )
-
- def test_failures(self):
- def report_item_generator(i):
- if i == 1:
- raise NodeConnectionException("node", "command", "reason")
- elif i == 2:
- raise LibraryError(
- ReportItem.error(
- report_codes.COMMON_ERROR,
- ),
- ReportItem.info(
- report_codes.COMMON_INFO,
- )
- )
-
- assert_raise_library_error(
- lambda: lib_sbd._run_parallel_and_raise_lib_error_on_failure(
- report_item_generator,
- [([i], {}) for i in range(5)]
- ),
- (
- Severities.ERROR,
- report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
- {
- "node": "node",
- "command": "command",
- "reason": "reason"
- }
- ),
- (
- Severities.ERROR,
- report_codes.COMMON_ERROR,
- {}
- ),
- (
- Severities.INFO,
- report_codes.COMMON_INFO,
- {}
- )
- )
-
-
class EvenNumberOfNodesAndNoQdevice(TestCase):
def setUp(self):
self.mock_corosync_conf = mock.MagicMock(spec_set=CorosyncConfigFacade)
@@ -272,6 +221,7 @@ class AtbHasToBeEnabledTest(TestCase):
)
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.CheckSbd")
class CheckSbdTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -284,7 +234,7 @@ class CheckSbdTest(TestCase):
"device_list=%5B%22%2Fdev%2Fsdb1%22%2C+%22%2Fdev%2Fsdc%22%5D"
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.CheckSbd")
@mock.patch("pcs.lib.sbd.check_sbd")
class CheckSbdOnNodeTest(TestCase):
def setUp(self):
@@ -548,7 +498,7 @@ class CheckSbdOnNodeTest(TestCase):
)
self.assertEqual(0, len(self.mock_rep.report_item_list))
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.CheckSbd")
@mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
class CheckSbdOnAllNodesTest(TestCase):
def test_success(self, mock_func):
@@ -581,7 +531,7 @@ class CheckSbdOnAllNodesTest(TestCase):
]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.SetSbdConfig")
class SetSbdConfigTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -599,7 +549,7 @@ SBD_WATCHDOG_TIMEOUT=0
node, "remote/set_sbd_config", "config=" + cfg_url_encoded
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.SetSbdConfig")
@mock.patch("pcs.lib.sbd.set_sbd_config")
class SetSbdConfigOnNodeTest(TestCase):
def setUp(self):
@@ -659,7 +609,7 @@ SBD_WATCHDOG_TIMEOUT=0
)]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.SetSbdConfig")
@mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
class SetSbdConfigOnAllNodesTest(TestCase):
def test_success(self, mock_func):
@@ -694,7 +644,7 @@ class SetSbdConfigOnAllNodesTest(TestCase):
]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.EnableSbdService")
class EnableSbdServiceTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -704,7 +654,7 @@ class EnableSbdServiceTest(TestCase):
node, "remote/sbd_enable", None
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.EnableSbdService")
class EnableSbdServiceOnNodeTest(TestCase):
def setUp(self):
self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
@@ -729,7 +679,7 @@ class EnableSbdServiceOnNodeTest(TestCase):
)]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.EnableSbdService")
@mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
class EnableSbdServiceOnAllNodes(TestCase):
def test_success(self, mock_func):
@@ -742,7 +692,7 @@ class EnableSbdServiceOnAllNodes(TestCase):
[([mock_rep, mock_com, node], {}) for node in node_list]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.DisableSbdService")
class DisableSbdServiceTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -752,7 +702,7 @@ class DisableSbdServiceTest(TestCase):
node, "remote/sbd_disable", None
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.DisableSbdService")
class DisableSbdServiceOnNodeTest(TestCase):
def setUp(self):
self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
@@ -777,7 +727,7 @@ class DisableSbdServiceOnNodeTest(TestCase):
)]
)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.DisableSbdService")
@mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
class DisableSbdServiceOnAllNodes(TestCase):
def test_success(self, mock_func):
@@ -790,7 +740,10 @@ class DisableSbdServiceOnAllNodes(TestCase):
[([mock_rep, mock_com, node], {}) for node in node_list]
)
-
+ at skip(
+ "TODO: rewrite for "
+ "pcs.lib.communication.sbd.SetStonithWatchdogTimeoutToZero"
+)
class SetStonithWatchdogTimeoutToZeroTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -800,7 +753,10 @@ class SetStonithWatchdogTimeoutToZeroTest(TestCase):
node, "remote/set_stonith_watchdog_timeout_to_zero", None
)
-
+ at skip(
+ "TODO: rewrite for "
+ "pcs.lib.communication.sbd.SetStonithWatchdogTimeoutToZero"
+)
@mock.patch("pcs.lib.sbd.set_stonith_watchdog_timeout_to_zero")
class SetStonithWatchdogTimeoutToZeroOnAllNodesTest(TestCase):
def setUp(self):
@@ -854,7 +810,9 @@ class SetStonithWatchdogTimeoutToZeroOnAllNodesTest(TestCase):
self.assertEqual(mock_func.call_count, len(func_calls))
mock_func.assert_has_calls(func_calls)
-
+ at skip(
+ "TODO: rewrite for pcs.lib.communication.sbd.RemoveStonithWatchdogTimeout"
+)
class RemoveStonithWatchdogTimeoutTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -864,7 +822,9 @@ class RemoveStonithWatchdogTimeoutTest(TestCase):
node, "remote/remove_stonith_watchdog_timeout", None
)
-
+ at skip(
+ "TODO: rewrite for pcs.lib.communication.sbd.RemoveStonithWatchdogTimeout"
+)
@mock.patch("pcs.lib.sbd.remove_stonith_watchdog_timeout")
class RemoveStonithWatchdogTimeoutOnAllNodesTest(TestCase):
def setUp(self):
@@ -918,7 +878,7 @@ class RemoveStonithWatchdogTimeoutOnAllNodesTest(TestCase):
self.assertEqual(mock_func.call_count, len(func_calls))
mock_func.assert_has_calls(func_calls)
-
+ at skip("TODO: rewrite for pcs.lib.communication.sbd.GetSbdStatus")
class GetSbdConfigTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -1244,4 +1204,3 @@ class SetMessageTest(TestCase):
self.mock_runner.run.assert_called_once_with([
settings.sbd_binary, "-d", "device", "message", "node", "test"
])
-
diff --git a/pcs/test/test_lib_tools.py b/pcs/test/test_lib_tools.py
index 3b84bc9..df5394c 100644
--- a/pcs/test/test_lib_tools.py
+++ b/pcs/test/test_lib_tools.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
index 1935848..7a04496 100644
--- a/pcs/test/test_node.py
+++ b/pcs/test/test_node.py
@@ -2,15 +2,16 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
from pcs import node
-from pcs.test.tools.assertions import AssertPcsMixin
-from pcs.test.tools.misc import (
+from pcs.test.tools.assertions import (
ac,
+ AssertPcsMixin,
+)
+from pcs.test.tools.misc import (
get_test_resource as rc,
outdent,
)
diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py
index 9634cca..121837d 100644
--- a/pcs/test/test_properties.py
+++ b/pcs/test/test_properties.py
@@ -2,17 +2,16 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
from pcs.test.tools import pcs_unittest as unittest
-from pcs.test.tools.assertions import AssertPcsMixin
-from pcs.test.tools.misc import (
+from pcs.test.tools.assertions import (
ac,
- get_test_resource as rc,
+ AssertPcsMixin,
)
+from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_runner import (
pcs,
PcsRunner,
diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
index c0769b5..16ede00 100644
--- a/pcs/test/test_quorum.py
+++ b/pcs/test/test_quorum.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 19c32ce..29413cc 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from lxml import etree
@@ -11,11 +10,13 @@ import shutil
from textwrap import dedent
from pcs.test.tools import pcs_unittest as unittest
-from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.assertions import (
+ ac,
+ AssertPcsMixin,
+)
from pcs.test.tools.cib import get_assert_pcs_effect_mixin
from pcs.test.tools.pcs_unittest import TestCase
from pcs.test.tools.misc import (
- ac,
get_test_resource as rc,
outdent,
skip_unless_pacemaker_supports_bundle,
@@ -207,15 +208,18 @@ class ResourceTest(unittest.TestCase, AssertPcsMixin):
temp_large_cib,
"resource create dummy0 ocf:heartbeat:Dummy"
)
- assert returnVal == 0
ac(output, '')
+ assert returnVal == 0
output, returnVal = pcs(temp_large_cib, "resource show dummy0")
assert returnVal == 0
ac(output, outdent(
"""\
Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
+ Operations: migrate_from interval=0s timeout=20 (dummy0-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (dummy0-migrate_to-interval-0s)
+ monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
+ reload interval=0s timeout=20 (dummy0-reload-interval-0s)
start interval=0s timeout=20 (dummy0-start-interval-0s)
stop interval=0s timeout=20 (dummy0-stop-interval-0s)
"""
@@ -748,8 +752,11 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
self.assert_pcs_success("resource show A", outdent(
"""\
Resource: A (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=10 (A-monitor-interval-10)
+ Operations: migrate_from interval=0s timeout=20 (A-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (A-migrate_to-interval-0s)
+ monitor interval=10 (A-monitor-interval-10)
monitor interval=20 (A-monitor-interval-20)
+ reload interval=0s timeout=20 (A-reload-interval-0s)
start interval=0s timeout=20 (A-start-interval-0s)
stop interval=0s timeout=20 (A-stop-interval-0s)
"""
@@ -775,8 +782,11 @@ monitor interval=20 (A-monitor-interval-20)
self.assert_pcs_success("resource show A", outdent(
"""\
Resource: A (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=11 (A-monitor-interval-11)
+ Operations: migrate_from interval=0s timeout=20 (A-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (A-migrate_to-interval-0s)
+ monitor interval=11 (A-monitor-interval-11)
monitor interval=20 (A-monitor-interval-20)
+ reload interval=0s timeout=20 (A-reload-interval-0s)
start interval=0s timeout=20 (A-start-interval-0s)
stop interval=0s timeout=20 (A-stop-interval-0s)
"""
@@ -3425,7 +3435,10 @@ Error: Cannot remove more than one resource from cloned group
Clone: group0-clone
Group: group0
Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
+ Operations: migrate_from interval=0s timeout=20 (dummy0-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (dummy0-migrate_to-interval-0s)
+ monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
+ reload interval=0s timeout=20 (dummy0-reload-interval-0s)
start interval=0s timeout=20 (dummy0-start-interval-0s)
stop interval=0s timeout=20 (dummy0-stop-interval-0s)
"""
@@ -4686,7 +4699,10 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
"""\
Clone: dummy-clone
Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ Operations: migrate_from interval=0s timeout=20 (dummy-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (dummy-migrate_to-interval-0s)
+ monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ reload interval=0s timeout=20 (dummy-reload-interval-0s)
start interval=0s timeout=20 (dummy-start-interval-0s)
stop interval=0s timeout=20 (dummy-stop-interval-0s)
"""
@@ -4703,7 +4719,10 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
"""\
Clone: dummy-clone
Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ Operations: migrate_from interval=0s timeout=20 (dummy-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (dummy-migrate_to-interval-0s)
+ monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ reload interval=0s timeout=20 (dummy-reload-interval-0s)
start interval=0s timeout=20 (dummy-start-interval-0s)
stop interval=0s timeout=20 (dummy-stop-interval-0s)
"""
@@ -4717,7 +4736,10 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
"""\
Master: dummy-master
Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ Operations: migrate_from interval=0s timeout=20 (dummy-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (dummy-migrate_to-interval-0s)
+ monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ reload interval=0s timeout=20 (dummy-reload-interval-0s)
start interval=0s timeout=20 (dummy-start-interval-0s)
stop interval=0s timeout=20 (dummy-stop-interval-0s)
"""
@@ -4734,7 +4756,10 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
"""\
Master: dummy-master
Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ Operations: migrate_from interval=0s timeout=20 (dummy-migrate_from-interval-0s)
+ migrate_to interval=0s timeout=20 (dummy-migrate_to-interval-0s)
+ monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ reload interval=0s timeout=20 (dummy-reload-interval-0s)
start interval=0s timeout=20 (dummy-start-interval-0s)
stop interval=0s timeout=20 (dummy-stop-interval-0s)
"""
diff --git a/pcs/test/test_rule.py b/pcs/test/test_rule.py
index 0ea3a8f..e3a0edc 100644
--- a/pcs/test/test_rule.py
+++ b/pcs/test/test_rule.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
@@ -10,10 +9,8 @@ from pcs.test.tools import pcs_unittest as unittest
import xml.dom.minidom
from pcs import rule
-from pcs.test.tools.misc import (
- ac,
- get_test_resource as rc,
-)
+from pcs.test.tools.assertions import ac
+from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_runner import pcs
empty_cib = rc("cib-empty.xml")
diff --git a/pcs/test/test_status.py b/pcs/test/test_status.py
index 09af303..b412b91 100644
--- a/pcs/test/test_status.py
+++ b/pcs/test/test_status.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 4a2073f..5bd13eb 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -2,16 +2,17 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import shutil
from pcs import utils
from pcs.cli.common.console_report import indent
-from pcs.test.tools.assertions import AssertPcsMixin
-from pcs.test.tools.misc import (
+from pcs.test.tools.assertions import (
ac,
+ AssertPcsMixin,
+)
+from pcs.test.tools.misc import (
get_test_resource as rc,
is_minimum_pacemaker_version,
skip_unless_pacemaker_version,
diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
index dba00c4..ac21da9 100644
--- a/pcs/test/test_utils.py
+++ b/pcs/test/test_utils.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import sys
@@ -2675,22 +2674,22 @@ class IsNodeStopCauseQuorumLossTest(unittest.TestCase):
quorum_info, False, ["rh70-node2", "rh70-node3"]
)
)
+
class CanAddNodeToCluster(unittest.TestCase):
def setUp(self):
- patcher = mock.patch("pcs.utils.check_can_add_node_to_cluster")
+ patcher = mock.patch("pcs.utils.run_com_cmd")
self.addCleanup(patcher.stop)
self.check_can_add = patcher.start()
def assert_report_list_cause_result(self, report_list, can_add, message):
- def side_effect(node_communicator, node, report_items):
- report_items.extend(
+ def side_effect(node_communicator, com_cmd):
+ com_cmd._report_items.extend(
report_list if isinstance(report_list, list) else [report_list]
)
self.check_can_add.side_effect = side_effect
result_can_add, result_message = utils.canAddNodeToCluster(
- node_communicator = None,
- node="node1"
+ None, "node1"
)
self.assertEqual((result_can_add, result_message), (can_add, message))
@@ -2756,3 +2755,17 @@ class CanAddNodeToCluster(unittest.TestCase):
),
"error checking node availability: reason"
)
+
+class TouchCibFile(unittest.TestCase):
+ @mock.patch("pcs.utils.os.path.isfile", mock.Mock(return_value=False))
+ @mock.patch(
+ "pcs.utils.write_empty_cib",
+ mock.Mock(side_effect=EnvironmentError("some message"))
+ )
+ @mock.patch("pcs.utils.err")
+ def test_exception_is_transformed_correctly(self, err):
+ filename = "/fake/filename"
+ utils.touch_cib_file(filename)
+ err.assert_called_once_with(
+ "Unable to write to file: '/fake/filename': 'some message'"
+ )
diff --git a/pcs/test/tools/assertions.py b/pcs/test/tools/assertions.py
index 5177598..d909d96 100644
--- a/pcs/test/tools/assertions.py
+++ b/pcs/test/tools/assertions.py
@@ -2,20 +2,36 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
+import difflib
import doctest
from lxml.doctestcompare import LXMLOutputChecker
from lxml.etree import LXML_VERSION
import re
from pcs.lib.errors import LibraryError
-from pcs.test.tools.misc import prepare_diff
# cover python2 vs. python3 differences
_re_object_type = type(re.compile(""))
+def prepare_diff(first, second):
+ """
+ Return a string containing a diff of first and second
+ """
+ return "".join(
+ difflib.Differ().compare(first.splitlines(1), second.splitlines(1))
+ )
+
+def ac(a,b):
+ """
+ Compare the actual output 'a' and an expected output 'b', print diff b a
+ """
+ if a != b:
+ raise AssertionError(
+ "strings not equal:\n{0}".format(prepare_diff(b, a))
+ )
+
def start_tag_error_text():
"""lxml 3.7+ gives a longer 'start tag expected' error message,
handle it here so multiple tests can just get the appropriate
@@ -249,48 +265,53 @@ def assert_report_item_equal(real_report_item, report_item_info):
)
)
-def assert_report_item_list_equal(real_report_item_list, report_info_list):
- for report_item in real_report_item_list:
- report_info_list.remove(
- __find_report_info(report_info_list, report_item)
+def assert_report_item_list_equal(
+ real_report_item_list, expected_report_info_list, hint=""
+):
+ for real_report_item in real_report_item_list:
+ expected_report_info_list.remove(
+ __find_report_info(expected_report_info_list, real_report_item)
)
- if report_info_list:
- raise AssertionError(
- "LibraryError is missing expected ReportItems ("
- +str(len(report_info_list))+"):\n"
- + "\n".join(map(repr, report_info_list))
+ if expected_report_info_list:
+ def format_items(item_type, item_list):
+ caption = "{0} ReportItems({1})".format(item_type, len(item_list))
+ return "{0}\n{1}\n{2}".format(
+ caption,
+ "-"*len(caption),
+ "\n".join(map(repr, item_list))
+ )
- + "\nreal ReportItems ("+str(len(real_report_item_list))+"):\n"
- + "\n".join(map(repr, real_report_item_list))
+ raise AssertionError(
+ "\nExpected LibraryError is missing\n{0}\n\n{1}\n\n{2}".format(
+ "{0}\n".format(hint) if hint else "",
+ format_items("expected", expected_report_info_list),
+ format_items("real", real_report_item_list),
+ )
)
def assert_raise_library_error(callableObj, *report_info_list):
- if not report_info_list:
- raise AssertionError(
- "Raising LibraryError expected, but no report item specified."
- + " Please specify report items, that you expect in LibraryError"
- )
try:
callableObj()
raise AssertionError("LibraryError not raised")
except LibraryError as e:
assert_report_item_list_equal(e.args, list(report_info_list))
-def __find_report_info(report_info_list, report_item):
- for report_info in report_info_list:
- if __report_item_equal(report_item, report_info):
+def __find_report_info(expected_report_info_list, real_report_item):
+ for report_info in expected_report_info_list:
+ if __report_item_equal(real_report_item, report_info):
return report_info
raise AssertionError(
"Unexpected report given: \n{0} \nexpected reports are: \n{1}"
.format(
repr((
- report_item.severity,
- report_item.code,
- report_item.info,
- report_item.forceable
+ real_report_item.severity,
+ real_report_item.code,
+ real_report_item.info,
+ real_report_item.forceable
)),
- "\n".join(map(repr, report_info_list)) if report_info_list
- else " No report is expected!"
+ "\n".join(map(repr, expected_report_info_list))
+ if expected_report_info_list
+ else " No other report is expected!"
)
)
diff --git a/pcs/test/tools/check/test_misc.py b/pcs/test/tools/check/test_misc.py
index 0d0f319..0698dfa 100644
--- a/pcs/test/tools/check/test_misc.py
+++ b/pcs/test/tools/check/test_misc.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.pcs_unittest import TestCase
diff --git a/pcs/test/tools/cib.py b/pcs/test/tools/cib.py
index 23ff869..69e0fea 100644
--- a/pcs/test/tools/cib.py
+++ b/pcs/test/tools/cib.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.assertions import AssertPcsMixin, assert_xml_equal
diff --git a/pcs/test/tools/color_text_runner/__init__.py b/pcs/test/tools/color_text_runner/__init__.py
index a82d63e..0da2fe5 100644
--- a/pcs/test/tools/color_text_runner/__init__.py
+++ b/pcs/test/tools/color_text_runner/__init__.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.color_text_runner.result import get_text_test_result_class
diff --git a/pcs/test/tools/color_text_runner/format.py b/pcs/test/tools/color_text_runner/format.py
index 7fb957b..f783c39 100644
--- a/pcs/test/tools/color_text_runner/format.py
+++ b/pcs/test/tools/color_text_runner/format.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import re
diff --git a/pcs/test/tools/color_text_runner/result.py b/pcs/test/tools/color_text_runner/result.py
index 4fda261..afc275d 100644
--- a/pcs/test/tools/color_text_runner/result.py
+++ b/pcs/test/tools/color_text_runner/result.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools import pcs_unittest as unittest
from pcs.test.tools.color_text_runner.format import (
diff --git a/pcs/test/tools/color_text_runner/writer.py b/pcs/test/tools/color_text_runner/writer.py
index d927cd1..53f618c 100644
--- a/pcs/test/tools/color_text_runner/writer.py
+++ b/pcs/test/tools/color_text_runner/writer.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
from pcs.test.tools.color_text_runner.format import (
diff --git a/pcs/test/tools/command_env/__init__.py b/pcs/test/tools/command_env/__init__.py
new file mode 100644
index 0000000..9cecc58
--- /dev/null
+++ b/pcs/test/tools/command_env/__init__.py
@@ -0,0 +1,83 @@
+"""
+This is the set of tools for testing commands (pcs.lib.commands).
+
+The principle is to patch some parts of the library environment object
+(pcs.lib.env.LibraryEnvironment) that is passed as first argument to each
+command.
+
+Important parts:
+================
+CallListBuilder + (Call)Queue
+-----------------------------
+Both objects stores list of calls (messages to the mocked parts of environment).
+CallListBuilder is used in configuration phase (before command run) to build
+the call list.
+Queue is used in run phase (during command run) to check that everything is done
+as expected.
+
+Mocks (Runner, push_cib, ...)
+-----------------------------
+Mocks replaces real environment parts. Every Mock has an access to Queue.
+Everytime when the mock obtain a message from tested command it takes expected
+message from Queue. Then the mock compares expected and real message. When the
+messages match each other then the Mock returns expected result. Otherwise Mock
+call fails.
+
+With each Mock commes Call that represent message appropriate for the concrete
+Mock.
+
+Config (with RunnerConfig, CibShortcuts, EnvConfig, ...)
+--------------------------------------------------------
+The tests use the Config for building list of expected calls (messages to
+the mocked parts). Config stores list of calls in CallListBuilder.
+
+EnvAssitant
+-----------
+EnvAssitant provides CallListBuilder to Config. When test requests an
+environment (from the EnvAssitant) then the EnvAssitant:
+* takes calls from Config and prepares the Queue (of calls)
+* creates appropriate mock and provide them the Queue
+* patches environment by appropriate mocks
+* returns patched environment
+
+When the test is done the EnvAssitant unpatches the environment and do requeired
+checks (that whole Queue is consumed, that there was no extra reports, ...)
+
+Example:
+========
+from pcs.lib.commands import resource
+from pcs.test.tools.command_env import get_env_tools
+from pcs.test.tools.pcs_unittest import TestCase
+
+class ExampleTest(TestCase):
+ def test_success(self):
+ env_assist, config = get_env_tools(test_case=self)
+ (config
+ .runner.cib.load()
+ .runner.cib.push(
+ resources='''
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ '''
+ )
+ )
+ resource.bundle_create(
+ self.env_assist.get_env(),
+ "B1",
+ "docker",
+ container_options={"image": "pcs:test"},
+ ensure_disabled=disabled,
+ wait=False,
+ )
+"""
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+from pcs.test.tools.command_env.config import Config
+from pcs.test.tools.command_env.assistant import EnvAssistant
+from pcs.test.tools.command_env.tools import get_env_tools
diff --git a/pcs/test/tools/command_env/assistant.py b/pcs/test/tools/command_env/assistant.py
new file mode 100644
index 0000000..6d8d607
--- /dev/null
+++ b/pcs/test/tools/command_env/assistant.py
@@ -0,0 +1,185 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+import logging
+from functools import partial
+
+from pcs.lib.env import LibraryEnvironment
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.command_env.calls import Queue as CallQueue
+from pcs.test.tools.command_env.config import Config
+from pcs.test.tools.command_env.mock_push_cib import(
+ get_push_cib,
+ is_push_cib_call_in,
+)
+from pcs.test.tools.command_env.mock_runner import Runner
+from pcs.test.tools.command_env.mock_get_local_corosync_conf import(
+ get_get_local_corosync_conf
+)
+from pcs.test.tools.command_env.mock_node_communicator import NodeCommunicator
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.command_env import spy
+
+
+patch_lib_env = partial(mock.patch.object, LibraryEnvironment)
+
+def patch_env(call_queue, config, init_env):
+ #It is mandatory to patch some env objects/methods. It is ok when command
+ #does not use this objects/methods and specify no call for it. But it would
+ #be a problem when the test succeded because the live call respond correctly
+ #by accident. Such test would fails on different machine (with another live
+ #environment)
+
+ patcher_list = [
+ patch_lib_env(
+ "cmd_runner",
+ lambda env:
+ spy.Runner(init_env.cmd_runner()) if config.spy else Runner(
+ call_queue,
+ env_vars={} if not config.env.cib_tempfile else {
+ "CIB_file": config.env.cib_tempfile,
+ }
+ )
+ ),
+
+ mock.patch(
+ "pcs.lib.env.get_local_corosync_conf",
+ get_get_local_corosync_conf(call_queue) if not config.spy
+ else spy.get_local_corosync_conf
+ ),
+
+ patch_lib_env(
+ "get_node_communicator",
+ lambda env:
+ NodeCommunicator(call_queue) if not config.spy
+ else spy.NodeCommunicator(init_env.get_node_communicator())
+ )
+ ]
+
+ #It is not always desirable to patch the method push_cib. Some tests can
+ #patch only the internals (runner...). So push_cib is patched only when it
+ #is explicitly configured
+ if is_push_cib_call_in(call_queue):
+ patcher_list.append(
+ patch_lib_env("push_cib", get_push_cib(call_queue))
+ )
+
+ for patcher in patcher_list:
+ patcher.start()
+
+ def unpatch():
+ for patcher in patcher_list:
+ patcher.stop()
+
+ return unpatch
+
+class EnvAssistant(object):
+ # pylint: disable=too-many-instance-attributes
+ def __init__(
+ self, config=None, test_case=None,
+ exception_reports_in_processor_by_default=True
+ ):
+ """
+ TestCase test_case -- cleanup callback is registered to test_case if is
+ provided
+ """
+ self.__call_queue = None
+ self.__config = config if config else Config()
+ self.__reports_asserted = False
+ self.__extra_reports = []
+ self.exception_reports_in_processor_by_default = (
+ exception_reports_in_processor_by_default
+ )
+
+ self.__unpatch = None
+
+ if test_case:
+ test_case.addCleanup(self.cleanup)
+
+ @property
+ def config(self):
+ return self.__config
+
+ def cleanup(self):
+ if self.__unpatch:
+ self.__unpatch()
+
+ if not self.__reports_asserted:
+ self.__assert_environment_created()
+ if not self.__config.spy:
+ self._env.report_processor.assert_reports(
+ self.__extra_reports,
+ hint="EnvAssistant.cleanup - is param"
+ " 'expected_in_processor' in the method"
+ " 'assert_raise_library_error' set correctly?"
+ )
+
+ if not self.__config.spy:
+ if self.__call_queue and self.__call_queue.remaining:
+ raise AssertionError(
+ "There are remaining expected calls: \n '{0}'"
+ .format("'\n '".join([
+ repr(call) for call in self.__call_queue.remaining
+ ]))
+ )
+
+ def get_env(self):
+ self.__call_queue = CallQueue(self.__config.calls)
+ #pylint: disable=attribute-defined-outside-init
+ self._env = LibraryEnvironment(
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor(),
+ cib_data=self.__config.env.cib_data,
+ corosync_conf_data=self.__config.env.corosync_conf_data,
+ token_file_data_getter=(
+ (lambda: {
+ "tokens": self.__config.spy.auth_tokens,
+ "ports": self.__config.spy.ports or {},
+ }) if self.__config.spy else None
+ )
+ )
+ self.__unpatch = patch_env(self.__call_queue, self.__config, self._env)
+ return self._env
+
+ def assert_reports(self, reports):
+ self.__reports_asserted = True
+ self.__assert_environment_created()
+ self._env.report_processor.assert_reports(
+ reports + self.__extra_reports
+ )
+
+ def assert_raise_library_error(
+ self, command, reports, expected_in_processor=None
+ ):
+ if not isinstance(reports, list):
+ raise self.__list_of_reports_expected(reports)
+
+ if expected_in_processor is None:
+ expected_in_processor = (
+ self.exception_reports_in_processor_by_default
+ )
+
+ assert_raise_library_error(command, *reports)
+ if expected_in_processor:
+ self.__extra_reports = reports
+
+ def __assert_environment_created(self):
+ if not hasattr(self, "_env"):
+ raise AssertionError(
+ "LibraryEnvironment was not created in EnvAssitant."
+ " Have you called method get_env?"
+ )
+
+ def __list_of_reports_expected(self, reports):
+ return AssertionError(
+ "{0}.{1} expects 'list' as reports parameter, '{2}' was given"
+ .format(
+ self.__class__.__name__,
+ "assert_raise",
+ type(reports).__name__
+ )
+ )
diff --git a/pcs/test/tools/command_env/calls.py b/pcs/test/tools/command_env/calls.py
new file mode 100644
index 0000000..81be429
--- /dev/null
+++ b/pcs/test/tools/command_env/calls.py
@@ -0,0 +1,240 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+def format_call(call):
+ if hasattr(call, "format"):
+ return call.format()
+ return call
+
+def show_calls(name_list, call_list):
+ return "\n".join([
+ " {0}. '{1}': {2}".format(i, x[0], format_call(x[1]))
+ for i, x in enumerate(zip(name_list, call_list))
+ ])
+
+
+class Queue(object):
+ def __init__(self, call_list_builder=None):
+ if not call_list_builder:
+ call_list_builder = CallListBuilder()
+
+ self.__call_list = call_list_builder.calls
+ self.__name_list = call_list_builder.names
+
+ self.__index = 0
+
+ def take(self, type_of_call, real_call_info=None):
+ if self.__index >= len(self.__call_list):
+ raise self.__extra_call(type_of_call, real_call_info)
+
+ call = self.__call_list[self.__index]
+
+ if call.type != type_of_call:
+ raise self.__unexpected_type(call, type_of_call, real_call_info)
+
+ self.__index += 1
+ return self.__index, call
+
+ def has_type(self, call_type):
+ return any(call.type == call_type for call in self.__call_list)
+
+ @property
+ def remaining(self):
+ return self.__call_list[self.__index:]
+
+ @property
+ def taken(self):
+ return self.__call_list[:self.__index]
+
+ def error_with_context(self, message):
+ return AssertionError(
+ "{0}\nAll calls in queue (current index={1}):\n{2}".format(
+ message,
+ self.__index,
+ show_calls(self.__name_list, self.__call_list),
+ )
+ )
+
+ def __unexpected_type(self, call, real_type, real_call_info):
+ return self.error_with_context(
+ (
+ "{0}. call was expected as '{1}' type but was '{2}' type"
+ "\n expected call: {3}{4}"
+ "\nHint: check call compatibility: for example if you use"
+ " env.push_cib() then runner.cib.push() will be never launched"
+ ).format(
+ self.__index + 1,
+ call.type,
+ real_type,
+ call,
+ "\n real call: {0}".format(real_call_info) if real_call_info
+ else ""
+ ,
+ )
+ )
+
+ def __extra_call(self, type_of_call, real_call_info):
+ return self.error_with_context(
+ "No next call expected, but was ({0}):\n '{1}'"
+ .format(type_of_call, real_call_info)
+ )
+
+class CallListBuilder(object):
+ def __init__(self):
+ self.__call_list = []
+ self.__name_list = []
+
+ @property
+ def calls(self):
+ return list(self.__call_list)
+
+ @property
+ def names(self):
+ return list(self.__name_list)
+
+
+ def __set(self, instead_name, name, call):
+ """
+ Replace call that has key instead_name with new call that has key name
+
+ string name -- key of the call
+ Call call
+ string instead_name -- key of call instead of which this new call is to
+ be placed
+ """
+ if instead_name not in self.__name_list:
+ raise self.__cannot_put("instead of", instead_name, name, call)
+
+ for i, current_name in enumerate(self.__name_list):
+ if current_name == instead_name:
+ self.__call_list[i] = call
+ #yes we change the name as well
+ self.__name_list[i] = name
+ return
+
+ def __append(self, name, call):
+ """
+ Append call.
+
+ string name -- key of the call
+ Call call
+ """
+ self.__name_list.append(name)
+ self.__call_list.append(call)
+
+ def __insert(self, before_name, name, call):
+ """
+ Insert call before call with before_name.
+
+ string before_name -- key of call before which this new call is to be
+ placed
+ string name -- key of the call
+ Call call
+ """
+ if before_name not in self.__name_list:
+ raise self.__cannot_put("before", before_name, name, call)
+
+ index = self.__name_list.index(before_name)
+ self.__name_list.insert(index, name)
+ self.__call_list.insert(index, call)
+
+ def remove(self, name):
+ """
+ Remove call under key name.
+ """
+ try:
+ index = self.__name_list.index(name)
+ del self.__call_list[index]
+ del self.__name_list[index]
+ except ValueError:
+ raise self.__name_not_exists(name)
+
+ def get(self, name):
+ """
+ Get first call with name.
+
+ string name -- key of the call
+ """
+ try:
+ return self.__call_list[self.__name_list.index(name)]
+ except ValueError:
+ raise self.__name_not_exists(name)
+
+ def place(self, name, call, before=None, instead=None):
+ """
+ Place call into calllist.
+
+ string name -- key of the call
+ Call call
+ string before -- key of call before which this new call is to be placed
+ string instead -- key of call instead of which this new call is to be
+ placed
+ """
+ if name and name in self.__name_list and instead != name:
+ raise self.__name_exists_already(name)
+
+ if before and instead:
+ raise self.__cannot_use_before_and_instead(
+ name,
+ call,
+ before,
+ instead,
+ )
+
+ if not hasattr(call, "type") or not call.type:
+ raise self.__type_of_call_is_not_specified(call)
+
+ if before:
+ self.__insert(before, name, call)
+ elif instead:
+ self.__set(instead, name, call)
+ else:
+ self.__append(name, call)
+
+ def __error_with_context(self, message):
+ return AssertionError(
+ "{0}\nIn the confituration call collection are calls:\n{1}".format(
+ message,
+ show_calls(self.__name_list, self.__call_list),
+ )
+ )
+
+ def __type_of_call_is_not_specified(self, call):
+ return AssertionError(
+ "Class {0}.{1} must have the attribute 'type' with no-falsy value."
+ .format(call.__module__, call.__class__.__name__)
+ )
+
+ def __name_not_exists(self, name):
+ return self.__error_with_context(
+ "Call named '{0}' does not exist.".format(name)
+ )
+
+ def __name_exists_already(self, name):
+ return self.__error_with_context(
+ "Name '{0}' is in this configuration already.".format(name)
+ )
+
+ def __cannot_use_before_and_instead(self, name, call, before, instead):
+ return self.__error_with_context(
+ (
+ "Args 'before' ({0}) and 'instead' ({1}) cannot be used"
+ " together\n '{2}': {3}"
+ ).format(before, instead, name, call)
+ )
+
+ def __cannot_put(self, where_type, where_name, name, call):
+ return self.__error_with_context(
+ (
+ "Cannot put call named '{0}' ({1}) {2} '{3}'"
+ " because '{3}' does not exist."
+ ).format(
+ name,
+ call,
+ where_type,
+ where_name,
+ )
+ )
diff --git a/pcs/test/tools/command_env/config.py b/pcs/test/tools/command_env/config.py
new file mode 100644
index 0000000..dbe6d1a
--- /dev/null
+++ b/pcs/test/tools/command_env/config.py
@@ -0,0 +1,71 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.calls import CallListBuilder
+from pcs.test.tools.command_env.config_env import EnvConfig
+from pcs.test.tools.command_env.config_corosync_conf import CorosyncConf
+from pcs.test.tools.command_env.config_runner import RunnerConfig
+from pcs.test.tools.command_env.config_http import HttpConfig
+
+class Spy(object):
+ def __init__(self, auth_tokens=None, ports=None):
+ self.auth_tokens = auth_tokens
+ self.ports = ports
+
+class Config(object):
+ def __init__(self):
+ self.__calls = CallListBuilder()
+ self.runner = self.__wrap_helper(
+ RunnerConfig(
+ self.__calls,
+ self.__wrap_helper,
+ )
+ )
+ self.env = self.__wrap_helper(EnvConfig(self.__calls))
+ self.http = self.__wrap_helper(HttpConfig(self.__calls))
+ self.corosync_conf = self.__wrap_helper(CorosyncConf(self.__calls))
+
+ self.spy = None
+
+ def set_spy(self, auth_tokens, ports=None):
+ self.spy = Spy(auth_tokens, ports)
+ return self
+
+
+ @property
+ def calls(self):
+ return self.__calls
+
+ def remove(self, name):
+ """
+ Remove call with specified name from list.
+ """
+ self.__calls.remove(name)
+ return self
+
+ def __wrap_method(self, helper, name, method):
+ """
+ Wrap method in helper to return self of this object
+
+ object helper -- helper for creatig call configuration
+ string name -- name of method in helper
+ callable method
+ """
+ def wrapped_method(*args, **kwargs):
+ method(helper, *args, **kwargs)
+ return self
+ setattr(helper, name, wrapped_method)
+
+ def __wrap_helper(self, helper):
+ """
+ Wrap every public method in helper to return self of this object
+
+ object helper -- helper for creatig call configuration
+ """
+ for name, attr in helper.__class__.__dict__.items():
+ if not name.startswith("_") and hasattr(attr, "__call__"):
+ self.__wrap_method(helper, name, attr)
+ return helper
diff --git a/pcs/test/tools/command_env/config_corosync_conf.py b/pcs/test/tools/command_env/config_corosync_conf.py
new file mode 100644
index 0000000..dd2e07c
--- /dev/null
+++ b/pcs/test/tools/command_env/config_corosync_conf.py
@@ -0,0 +1,53 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_get_local_corosync_conf import Call
+from pcs.lib.corosync.config_facade import ConfigFacade
+from pcs.lib.corosync.config_parser import Section
+from pcs.test.tools.misc import get_test_resource as rc
+
+class CorosyncConf(object):
+ def __init__(self, call_collection):
+ self.__calls = call_collection
+
+ def load_content(self, content, name="corosync_conf.load_content"):
+ self.__calls.place(name, Call(content))
+
+ def load(
+ self, node_name_list=None, name="corosync_conf.load",
+ auto_tie_breaker=None
+ ):
+ content = open(rc("corosync.conf")).read()
+ corosync_conf = None
+ if node_name_list:
+ corosync_conf = ConfigFacade.from_string(content).config
+ for nodelist in corosync_conf.get_sections(name="nodelist"):
+ corosync_conf.del_section(nodelist)
+
+ nodelist_section = Section("nodelist")
+ corosync_conf.add_section(nodelist_section)
+ for i, node_name in enumerate(node_name_list):
+ node_section = Section("node")
+ node_section.add_attribute("ring0_addr", node_name)
+ node_section.add_attribute("nodeid", i)
+ nodelist_section.add_section(node_section)
+
+
+ if auto_tie_breaker is not None:
+ corosync_conf = (
+ corosync_conf if corosync_conf
+ else ConfigFacade.from_string(content).config
+ )
+ for quorum in corosync_conf.get_sections(name="quorum"):
+ quorum.set_attribute(
+ "auto_tie_breaker",
+ "1" if auto_tie_breaker else "0"
+ )
+
+ if corosync_conf:
+ content = corosync_conf.export()
+
+ self.load_content(content, name)
diff --git a/pcs/test/tools/command_env/config_env.py b/pcs/test/tools/command_env/config_env.py
new file mode 100644
index 0000000..9f52629
--- /dev/null
+++ b/pcs/test/tools/command_env/config_env.py
@@ -0,0 +1,84 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_push_cib import Call as PushCibCall
+from pcs.test.tools.fixture_cib import modify_cib
+
+
+class EnvConfig(object):
+ def __init__(self, call_collection):
+ self.__calls = call_collection
+ self.__cib_data = None
+ self.__cib_tempfile = None
+ self.__corosync_conf_data = None
+
+
+ def set_cib_data(self, cib_data, cib_tempfile="/fake/tmp/file"):
+ self.__cib_data = cib_data
+ self.__cib_tempfile = cib_tempfile
+
+
+ @property
+ def cib_data(self):
+ return self.__cib_data
+
+ @property
+ def cib_tempfile(self):
+ return self.__cib_tempfile
+
+ def set_corosync_conf_data(self, corosync_conf_data):
+ self.__corosync_conf_data = corosync_conf_data
+
+ @property
+ def corosync_conf_data(self):
+ return self.__corosync_conf_data
+
+ def push_cib(
+ self, modifiers=None, name="env.push_cib",
+ load_key="runner.cib.load", wait=False, exception=None, instead=None,
+ **modifier_shortcuts
+ ):
+ """
+ Create call for pushing cib.
+
+ string name -- key of the call
+ list of callable modifiers -- every callable takes etree.Element and
+ returns new etree.Element with desired modification.
+ string load_key -- key of a call from which stdout can be cib taken
+ string|False wait -- wait for pacemaker idle
+ Exception|None exception -- exception that should raise env.push_cib
+ string instead -- key of call instead of which this new call is to be
+ placed
+ dict modifier_shortcuts -- a new modifier is generated from each
+ modifier shortcut.
+ As key there can be keys of MODIFIER_GENERATORS.
+ Value is passed into appropriate generator from MODIFIER_GENERATORS.
+ For details see pcs.test.tools.fixture_cib (mainly the variable
+ MODIFIER_GENERATORS - please refer it when you are adding params
+ here)
+ """
+ cib_xml = modify_cib(
+ self.__calls.get(load_key).stdout,
+ modifiers,
+ **modifier_shortcuts
+ )
+ self.__calls.place(
+ name,
+ PushCibCall(cib_xml, wait=wait, exception=exception),
+ instead=instead
+ )
+
+ def push_cib_custom(
+ self, name="env.push_cib_custom", custom_cib=None, wait=False,
+ exception=None, instead=None
+ ):
+ self.__calls.place(
+ name,
+ PushCibCall(
+ custom_cib, custom_cib=True, wait=wait, exception=exception
+ ),
+ instead=instead
+ )
diff --git a/pcs/test/tools/command_env/config_http.py b/pcs/test/tools/command_env/config_http.py
new file mode 100644
index 0000000..b5c610b
--- /dev/null
+++ b/pcs/test/tools/command_env/config_http.py
@@ -0,0 +1,126 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.common import pcs_pycurl as pycurl
+from pcs.common.node_communicator import(
+ RequestTarget,
+ RequestData,
+ Request,
+ Response,
+)
+from pcs.test.tools.command_env.mock_node_communicator import(
+ AddRequestCall,
+ StartLoopCall,
+)
+from pcs.test.tools.custom_mock import MockCurlSimple
+
+
+class HttpConfig(object):
+ def __init__(self, call_collection):
+ self.__calls = call_collection
+
+ def __communication_to_response(
+ self, label, address_list, action, param_list, port, token,
+ response_code, output, debug_output, was_connected, errno,
+ error_msg
+ ):
+ return Response(
+ MockCurlSimple(
+ info={pycurl.RESPONSE_CODE: response_code},
+ output=output.encode("utf-8"),
+ debug_output=debug_output.encode("utf-8"),
+ request=Request(
+ RequestTarget(label, address_list, port, token),
+ RequestData(action, param_list),
+ )
+ ),
+ was_connected=was_connected,
+ errno=6,
+ error_msg= error_msg,
+ )
+
+ def add_communication(
+ self, name, communication_list,
+ action="", param_list=None, port=None, token=None,
+ response_code=None, output="", debug_output="", was_connected=True,
+ errno=0, error_msg_template=None
+ ):
+ """
+ list of dict communication_list -- is setting for one request - response
+ it accepts keys:
+ label -- required, see RequestTarget
+ action -- pcsd url, see RequestData
+ param_list -- list of pairs, see RequestData
+ port -- see RequestTarget
+ token=None -- see RequestTarget
+ response_code -- http response code
+ output -- http response output
+ debug_output -- pycurl debug output
+ was_connected -- see Response
+ errno -- see Response
+ error_msg -- see Response
+ if some key is not present, it is put here from common values - rest
+ args of this fuction(except name, communication_list,
+ error_msg_template)
+ string error_msg_template -- template, the keys for format function will
+ be taken from appropriate item of communication_list
+ string action -- pcsd url, see RequestData
+ list of pairs (tuple) param_list -- see RequestData
+ string port -- see RequestTarget
+ string token=None -- see RequestTarget
+ string response_code -- http response code
+ string output -- http response output
+ string debug_output -- pycurl debug output
+ bool was_connected -- see Response
+ int errno -- see Response
+ string error_msg -- see Response
+ """
+ response_list = []
+
+ common = dict(
+ action=action,
+ param_list=param_list if param_list else [],
+ port=port,
+ token=token,
+ response_code=response_code,
+ output=output,
+ debug_output=debug_output,
+ was_connected=was_connected,
+ errno=errno,
+ )
+ for communication in communication_list:
+ if "address_list" not in communication:
+ communication["address_list"] = [communication["label"]]
+
+ full = common.copy()
+ full.update(communication)
+
+ if "error_msg" not in full:
+ full["error_msg"] = (
+ "" if not error_msg_template
+ else error_msg_template.format(**full)
+ )
+
+
+ response_list.append(
+ self.__communication_to_response(**full)
+ )
+
+ request_list = [response.request for response in response_list]
+
+ #TODO #when multiple add_request needed there should be:
+ # * unique name for each add_request
+ # * find start_loop by name and replace it with the new one that will
+ # have merged responses
+ self.add_requests(request_list, name="{0}_requests".format(name))
+ self.start_loop(response_list, name="{0}_responses".format(name))
+
+
+ def add_requests(self, request_list, name):
+ self.__calls.place(name, AddRequestCall(request_list))
+
+ def start_loop(self, response_list, name):
+ self.__calls.place(name, StartLoopCall(response_list))
diff --git a/pcs/test/tools/command_env/config_runner.py b/pcs/test/tools/command_env/config_runner.py
new file mode 100644
index 0000000..3c09277
--- /dev/null
+++ b/pcs/test/tools/command_env/config_runner.py
@@ -0,0 +1,43 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.config_runner_cib import CibShortcuts
+from pcs.test.tools.command_env.config_runner_pcmk import PcmkShortcuts
+from pcs.test.tools.command_env.config_runner_corosync import CorosyncShortcuts
+from pcs.test.tools.command_env.config_runner_systemctl import SystemctlShortcuts
+from pcs.test.tools.command_env.mock_runner import Call as RunnerCall
+
+
+class RunnerConfig(object):
+ def __init__(self, call_collection, wrap_helper):
+ self.__calls = call_collection
+
+ self.cib = wrap_helper(CibShortcuts(self.__calls))
+ self.pcmk = wrap_helper(PcmkShortcuts(self.__calls))
+ self.corosync = wrap_helper(CorosyncShortcuts(self.__calls))
+ self.systemctl = wrap_helper(SystemctlShortcuts(self.__calls))
+
+ def place(
+ self, command,
+ name="", stdout="", stderr="", returncode=0, check_stdin=None,
+ before=None, instead=None
+ ):
+ """
+ Place new call to a config.
+
+ string command -- cmdline call (e.g. "crm_mon --one-shot --as-xml")
+ string name -- name of the call; it is possible to get it by the method
+ "get"
+ string stdout -- stdout of the call
+ string stderr -- stderr of the call
+ int returncode -- returncode of the call
+ callable check_stdin -- callable that can check if stdin is as expected
+ string before -- name of another call to insert this call before it
+ string instead -- name of another call to replace it by this call
+ """
+ call = RunnerCall(command, stdout, stderr, returncode, check_stdin)
+ self.__calls.place(name, call, before, instead)
+ return self
diff --git a/pcs/test/tools/command_env/config_runner_cib.py b/pcs/test/tools/command_env/config_runner_cib.py
new file mode 100644
index 0000000..797e2a8
--- /dev/null
+++ b/pcs/test/tools/command_env/config_runner_cib.py
@@ -0,0 +1,210 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.mock_runner import(
+ Call as RunnerCall,
+ create_check_stdin_xml,
+)
+from pcs.test.tools.fixture_cib import modify_cib
+from pcs.test.tools.misc import get_test_resource as rc
+
+
+CIB_FILENAME = "cib-empty.xml"
+
+
+class CibShortcuts(object):
+ def __init__(self, calls):
+ """
+ CallCollection calls -- provides access to call list
+ """
+ self.__calls = calls
+ self.cib_filename = CIB_FILENAME
+
+ def load(
+ self,
+ modifiers=None,
+ name="runner.cib.load",
+ filename=None,
+ before=None,
+ returncode=0,
+ stderr=None,
+ **modifier_shortcuts
+ ):
+ """
+ Create call for loading cib.
+
+ string name -- key of the call
+ list of callable modifiers -- every callable takes etree.Element and
+ returns new etree.Element with desired modification.
+ string filename -- points to file with cib in the content
+ string before -- key of call before which this new call is to be placed
+ dict modifier_shortcuts -- a new modifier is generated from each
+ modifier shortcut.
+ As key there can be keys of MODIFIER_GENERATORS.
+ Value is passed into appropriate generator from MODIFIER_GENERATORS.
+ For details see pcs.test.tools.fixture_cib (mainly the variable
+ MODIFIER_GENERATORS - please refer it when you are adding params
+ here)
+ """
+ if(returncode != 0 or stderr is not None) and (
+ modifiers is not None
+ or
+ filename is not None
+ or
+ modifier_shortcuts
+ ):
+ raise AssertionError(
+ "Do not combine parameters 'returncode' and 'stderr' with"
+ " parameters 'modifiers', 'filename' and 'modifier_shortcuts'"
+ )
+
+ command = "cibadmin --local --query"
+ if returncode != 0:
+ call = RunnerCall(command, stderr=stderr, returncode=returncode)
+ else:
+ cib = modify_cib(
+ open(rc(filename if filename else self.cib_filename)).read(),
+ modifiers,
+ **modifier_shortcuts
+ )
+ call = RunnerCall(command, stdout=cib)
+
+ self.__calls.place(name, call, before=before)
+
+ def push(
+ self,
+ modifiers=None,
+ name="runner.cib.push",
+ load_key="runner.cib.load",
+ instead=None,
+ stderr="",
+ returncode=0,
+ **modifier_shortcuts
+ ):
+ """
+ Create call for pushing cib.
+ Cib is taken from the load call by default.
+
+ string name -- key of the call
+ list of callable modifiers -- every callable takes etree.Element and
+ returns new etree.Element with desired modification.
+ string load_key -- key of a call from which stdout can be cib taken
+ string instead -- key of call instead of which this new call is to be
+ placed
+ dict modifier_shortcuts -- a new modifier is generated from each
+ modifier shortcut.
+ As key there can be keys of MODIFIER_GENERATORS.
+ Value is passed into appropriate generator from MODIFIER_GENERATORS.
+ For details see pcs.test.tools.fixture_cib (mainly the variable
+ MODIFIER_GENERATORS - please refer it when you are adding params
+ here)
+ """
+ cib = modify_cib(
+ self.__calls.get(load_key).stdout,
+ modifiers,
+ **modifier_shortcuts
+ )
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "cibadmin --replace --verbose --xml-pipe --scope configuration",
+ stderr=stderr,
+ returncode=returncode,
+ check_stdin=create_check_stdin_xml(cib),
+ ),
+ instead=instead,
+ )
+
+ def push_independent(
+ self,
+ cib,
+ name="runner.cib.push_independent",
+ instead=None,
+ ):
+ """
+ Create call for pushing cib.
+ Cib is specified as an argument.
+
+ string name -- key of the call
+ string cib -- whole cib to push
+ string instead -- key of call instead of which this new call is to be
+ placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "cibadmin --replace --verbose --xml-pipe --scope configuration",
+ check_stdin=create_check_stdin_xml(cib),
+ ),
+ instead=instead,
+ )
+
+ def diff(
+ self,
+ cib_old_file,
+ cib_new_file,
+ name="runner.cib.diff",
+ stdout="resulting diff",
+ stderr="",
+ returncode=0
+ ):
+ """
+ Create a call for diffing two CIBs stored in two files
+ string cib_old_file -- path to a file with an old CIB
+ string cib_new_file -- path to a file with a new CIB
+ string name -- key of the call
+ string stdout -- resulting diff
+ string stderr -- error returned from the diff process
+ int returncode -- exit code of the diff process
+ """
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "crm_diff --original {old} --new {new} --no-version".format(
+ old=cib_old_file, new=cib_new_file
+ ),
+ stdout=stdout,
+ stderr=stderr,
+ returncode=returncode,
+ ),
+ )
+
+ def push_diff(
+ self,
+ name="runner.cib.push_diff",
+ cib_diff="resulting diff",
+ stdout="",
+ stderr="",
+ returncode=0
+ ):
+ """
+ Create a call for pushing a diff of CIBs
+ string name -- key of the call
+ string cib_diff -- the diff of CIBs
+ """
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "cibadmin --patch --verbose --xml-pipe",
+ check_stdin=create_check_stdin_xml(cib_diff),
+ stdout=stdout,
+ stderr=stderr,
+ returncode=returncode,
+ ),
+ )
+
+ def upgrade(self, name="runner.cib.upgrade", before=None):
+ """
+ Create call for upgrading cib.
+
+ string name -- key of the call
+ string before -- key of call before which this new call is to be placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall("cibadmin --upgrade --force"),
+ before=before
+ )
diff --git a/pcs/test/tools/command_env/config_runner_corosync.py b/pcs/test/tools/command_env/config_runner_corosync.py
new file mode 100644
index 0000000..63fc984
--- /dev/null
+++ b/pcs/test/tools/command_env/config_runner_corosync.py
@@ -0,0 +1,40 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.misc import outdent
+from pcs.test.tools.command_env.mock_runner import Call as RunnerCall
+
+class CorosyncShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+
+ def version(self, name="runner.corosync.version", version="2.4.0"):
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "corosync -v",
+ stdout=outdent(
+ """\
+ Corosync Cluster Engine, version '{0}'
+ Copyright...
+ """.format(version)
+ )
+ )
+ )
+
+ def reload(self, name="runner.corosync.reload"):
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "corosync-cfgtool -R",
+ stdout=outdent(
+ """\
+ Reloading corosync.conf...
+ Done
+ """
+ )
+ )
+ )
diff --git a/pcs/test/tools/command_env/config_runner_pcmk.py b/pcs/test/tools/command_env/config_runner_pcmk.py
new file mode 100644
index 0000000..eb4cb5f
--- /dev/null
+++ b/pcs/test/tools/command_env/config_runner_pcmk.py
@@ -0,0 +1,133 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from lxml import etree
+
+from pcs.test.tools.command_env.mock_runner import Call as RunnerCall
+from pcs.test.tools.fixture import complete_state_resources
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.xml import etree_to_str
+
+
+DEFAULT_WAIT_TIMEOUT = 10
+WAIT_TIMEOUT_EXPIRED_RETURNCODE = 62
+
+class PcmkShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+ self.default_wait_timeout = DEFAULT_WAIT_TIMEOUT
+ self.default_wait_error_returncode = WAIT_TIMEOUT_EXPIRED_RETURNCODE
+
+ def load_state(
+ self, name="runner.pcmk.load_state", filename="crm_mon.minimal.xml",
+ resources=None
+ ):
+ """
+ Create call for loading pacemaker state.
+
+ string name -- key of the call
+ string filename -- points to file with the status in the content
+ string resources -- xml - resources section, will be put to state
+ """
+ state = etree.fromstring(open(rc(filename)).read())
+ if resources:
+ state.append(complete_state_resources(etree.fromstring(resources)))
+
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "crm_mon --one-shot --as-xml --inactive",
+ stdout=etree_to_str(state),
+ )
+ )
+
+ def load_agent(
+ self,
+ name="runner.pcmk.load_agent",
+ agent_name="ocf:heartbeat:Dummy",
+ agent_filename="resource_agent_ocf_heartbeat_dummy.xml",
+ instead=None,
+ ):
+ """
+ Create call for loading resource agent metadata.
+
+ string name -- key of the call
+ string agent_name
+ string agent_filename -- points to file with the agent metadata in the
+ content
+ string instead -- key of call instead of which this new call is to be
+ placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "crm_resource --show-metadata {0}".format(agent_name),
+ stdout=open(rc(agent_filename)).read()
+ ),
+ instead=instead,
+ )
+
+
+ def wait(
+ self, name="runner.pcmk.wait", stderr="", returncode=None, timeout=None
+ ):
+ """
+ Create call for waiting to pacemaker idle
+
+ string name -- key of the call
+ string stderr -- stderr of wait command
+ int returncode -- has default value 0 if stderr is empty and has default
+ configured value (62) if stderr is not empty. However the explicitly
+ specified returncode is used if the returncode is specified.
+ """
+ if returncode is None:
+ returncode = self.default_wait_error_returncode if stderr else 0
+
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "crm_resource --wait --timeout={0}".format(
+ timeout if timeout else self.default_wait_timeout
+ ),
+ stderr=stderr,
+ returncode=returncode,
+ )
+ )
+
+ def can_wait(
+ self, name="runner.pcmk.can_wait", before=None, stdout="--wait"
+ ):
+ """
+ Create call that checks that wait for idle is supported
+
+ string name -- key of the call
+ string before -- key of call before which this new call is to be placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall("crm_resource -?", stdout=stdout),
+ before=before
+ )
+
+ def verify(self, name="verify", cib_tempfile=None, stderr=None, verbose=False):
+ """
+ Create call that checks that wait for idle is supported
+
+ string name -- key of the call
+ string before -- key of call before which this new call is to be placed
+ """
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "crm_verify{0} {1}".format(
+ " -V" if verbose else "",
+ "--xml-file {0}".format(cib_tempfile) if cib_tempfile
+ else "--live-check"
+ ),
+ stderr=("" if stderr is None else stderr),
+ returncode=(0 if stderr is None else 55),
+ ),
+ )
diff --git a/pcs/test/tools/command_env/config_runner_systemctl.py b/pcs/test/tools/command_env/config_runner_systemctl.py
new file mode 100644
index 0000000..f3b10a1
--- /dev/null
+++ b/pcs/test/tools/command_env/config_runner_systemctl.py
@@ -0,0 +1,35 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs import settings
+from pcs.test.tools.command_env.mock_runner import Call as RunnerCall
+
+class SystemctlShortcuts(object):
+ def __init__(self, calls):
+ self.__calls = calls
+
+ def is_active(
+ self, service, name="runner_systemctl.is_active", is_active=True
+ ):
+ args = dict(
+ stdout="unknown\n",
+ returncode=3,
+ )
+ if is_active:
+ args = dict(
+ stdout="active\n",
+ returncode=0,
+ )
+ self.__calls.place(
+ name,
+ RunnerCall(
+ "{bin_path} is-active {service}.service".format(
+ bin_path=settings.systemctl_binary,
+ service=service,
+ ),
+ **args
+ )
+ )
diff --git a/pcs/test/tools/command_env/mock_get_local_corosync_conf.py b/pcs/test/tools/command_env/mock_get_local_corosync_conf.py
new file mode 100644
index 0000000..c1323b4
--- /dev/null
+++ b/pcs/test/tools/command_env/mock_get_local_corosync_conf.py
@@ -0,0 +1,23 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+CALL_TYPE_GET_LOCAL_COROSYNC_CONF = "CALL_TYPE_GET_LOCAL_COROSYNC_CONF"
+
+class Call(object):
+ type = CALL_TYPE_GET_LOCAL_COROSYNC_CONF
+
+ def __init__(self, content):
+ self.content = content
+
+ def __repr__(self):
+ return str("<GetLocalCorosyncConf>")
+
+
+def get_get_local_corosync_conf(call_queue):
+ def get_local_corosync_conf():
+ _, expected_call = call_queue.take(CALL_TYPE_GET_LOCAL_COROSYNC_CONF)
+ return expected_call.content
+ return get_local_corosync_conf
diff --git a/pcs/test/tools/command_env/mock_node_communicator.py b/pcs/test/tools/command_env/mock_node_communicator.py
new file mode 100644
index 0000000..f96872d
--- /dev/null
+++ b/pcs/test/tools/command_env/mock_node_communicator.py
@@ -0,0 +1,191 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+try:
+ # python 2
+ from urlparse import parse_qs
+except ImportError:
+ # python 3
+ from urllib.parse import parse_qs
+
+CALL_TYPE_HTTP_ADD_REQUESTS = "CALL_TYPE_HTTP_ADD_REQUESTS"
+CALL_TYPE_HTTP_START_LOOP = "CALL_TYPE_HTTP_START_LOOP"
+
+def log_request(request):
+ label_data = [
+ ("action", request.action),
+ ("label", request.target.label),
+ ("data", parse_qs(request.data)),
+ ]
+
+ if request.target.address_list != [request.target.label]:
+ label_data.append(("addres_list", request.target.address_list))
+
+ if request.target.port != "2224":
+ label_data.append(("port", request.target.port))
+
+
+ return " ".join([
+ "{0}:'{1}'".format(key, value) for key, value in label_data
+ ])
+
+def log_response(response, indent=0):
+ label_data = [
+ ("action", response.request.action),
+ ("label", response.request.target.label),
+ ]
+
+ if response.request.target.address_list != [response.request.target.label]:
+ label_data.append(("addres_list", response.request.target.address_list))
+
+ label_data.append(("was_connected", response.was_connected))
+
+ if response.request.target.port != "2224":
+ label_data.append(("port", response.request.target.port))
+
+ if response.was_connected:
+ label_data.append(("respose_code", response.response_code))
+ else:
+ label_data.extend([
+ ("errno", response.errno),
+ ("error_msg", response.error_msg),
+ ])
+
+ label_data.append(("data", parse_qs(response.request.data)))
+
+ return "{0}{1}".format(
+ " "*indent,
+ " ".join([
+ "{0}:'{1}'".format(key, value) for key, value in label_data
+ ]),
+ )
+
+def different_request_lists(expected_request_list, request_list):
+ return AssertionError(
+ (
+ "Method add_request of NodeCommunicator expected"
+ " request_list:\n * {0}\nbut got: \n * {1}"
+ )
+ .format(
+ "\n * ".join(log_request(r) for r in expected_request_list),
+ "\n * ".join(log_request(r) for r in request_list),
+ )
+ )
+
+
+def bad_request_list_content(errors):
+ return AssertionError(
+ "Method add_request of NodeCommunicator get different requests"
+ " than expected (key: (expected, real)): \n {0}".format(
+ "\n ".join([
+ "{0}:\n {1}".format(
+ index,
+ "\n ".join([
+ "{0}:\n {1}\n {2}"
+ .format(key, pair[0], pair[1])
+ for key, pair in value.items()
+ ])
+ )
+ for index, value in errors.items()
+ ]),
+ )
+ )
+
+
+class AddRequestCall(object):
+ type = CALL_TYPE_HTTP_ADD_REQUESTS
+
+ def __init__(self, request_list):
+ self.request_list = request_list
+
+ def format(self):
+ return "Requests:\n * {0}".format(
+ "\n * ".join([
+ log_request(request) for request in self.request_list
+ ])
+ )
+
+ def __repr__(self):
+ return str("<HttpAddRequest '{0}'>").format(self.request_list)
+
+class StartLoopCall(object):
+ type = CALL_TYPE_HTTP_START_LOOP
+
+ def format(self):
+ return "Responses:\n * {0}".format(
+ "\n * ".join([
+ log_response(response) for response in self.response_list
+ ])
+ )
+
+ def __init__(self, response_list):
+ self.response_list = response_list
+
+ def __repr__(self):
+ return str("<HttpStartLoop '{0}'>").format(self.response_list)
+
+class NodeCommunicator(object):
+ def __init__(self, call_queue=None):
+ self.__call_queue = call_queue
+
+ def add_requests(self, request_list):
+ _, add_request_call = self.__call_queue.take(
+ CALL_TYPE_HTTP_ADD_REQUESTS,
+ request_list,
+ )
+
+ expected_request_list = add_request_call.request_list
+
+ if len(expected_request_list) != len(request_list):
+ raise different_request_lists(expected_request_list, request_list)
+
+ errors = {}
+ for i, real_request in enumerate(request_list):
+ expected_request = add_request_call.request_list[i]
+
+ diff = {}
+ if expected_request.action != real_request.action:
+ diff["action"] = (
+ expected_request.action,
+ real_request.action
+ )
+
+ if expected_request.target.label != real_request.target.label:
+ diff["target.label"] = (
+ expected_request.target.label,
+ real_request.target.label
+ )
+
+ if expected_request.target.token != real_request.target.token:
+ diff["target.token"] = (
+ expected_request.target.token,
+ real_request.target.token
+ )
+
+ if expected_request.target.port != real_request.target.port:
+ diff["target.port"] = (
+ expected_request.target.port,
+ real_request.target.port
+ )
+
+ if expected_request.data != real_request.data:
+ diff["data"] = (
+ parse_qs(expected_request.data),
+ parse_qs(real_request.data)
+ )
+
+ if diff:
+ errors[i] = diff
+
+ if errors:
+ raise self.__call_queue.error_with_context(
+ bad_request_list_content(errors)
+ )
+
+
+ def start_loop(self):
+ _, call = self.__call_queue.take(CALL_TYPE_HTTP_START_LOOP)
+ return call.response_list
diff --git a/pcs/test/tools/command_env/mock_push_cib.py b/pcs/test/tools/command_env/mock_push_cib.py
new file mode 100644
index 0000000..6776747
--- /dev/null
+++ b/pcs/test/tools/command_env/mock_push_cib.py
@@ -0,0 +1,66 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.assertions import assert_xml_equal
+from pcs.test.tools.xml import etree_to_str
+
+CALL_TYPE_PUSH_CIB = "CALL_TYPE_PUSH_CIB"
+
+class Call(object):
+ type = CALL_TYPE_PUSH_CIB
+
+ def __init__(self, cib_xml, custom_cib=False, wait=False, exception=None):
+ self.cib_xml = cib_xml
+ self.custom_cib = custom_cib
+ self.wait = wait
+ self.exception = exception
+
+ def __repr__(self):
+ return str("<CibPush wait='{0}'>").format(self.wait)
+
+
+def get_push_cib(call_queue):
+ def push_cib(lib_env, custom_cib=None, wait=False):
+ i, expected_call = call_queue.take(CALL_TYPE_PUSH_CIB)
+
+ if custom_cib is None and expected_call.custom_cib:
+ raise AssertionError(
+ (
+ "Trying to call push cib (call no. {0}) without custom cib,"
+ " but a custom cib was expected"
+ ).format(i)
+ )
+ if custom_cib is not None and not expected_call.custom_cib:
+ raise AssertionError(
+ (
+ "Trying to call push cib (call no. {0}) with custom cib,"
+ " but no custom cib was expected"
+ ).format(i)
+ )
+
+ assert_xml_equal(
+ expected_call.cib_xml,
+ etree_to_str(lib_env.cib),
+ (
+ "Trying to call env.push cib (call no. {0}) but cib in env does"
+ " not match\n\n"
+ ).format(i)
+ )
+
+ if wait != expected_call.wait:
+ raise AssertionError(
+ (
+ "Trying to call push cib (call no. {0}) with 'wait' == {1}"
+ " but expected was 'wait' == {2}"
+ ).format(i, wait, expected_call.wait)
+ )
+
+ if expected_call.exception:
+ raise expected_call.exception
+ return push_cib
+
+def is_push_cib_call_in(call_queue):
+ return call_queue.has_type(CALL_TYPE_PUSH_CIB)
diff --git a/pcs/test/tools/command_env/mock_runner.py b/pcs/test/tools/command_env/mock_runner.py
new file mode 100644
index 0000000..6876d7a
--- /dev/null
+++ b/pcs/test/tools/command_env/mock_runner.py
@@ -0,0 +1,123 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from os import path
+
+from pcs import settings
+from pcs.test.tools.assertions import assert_xml_equal
+
+
+CALL_TYPE_RUNNER = "CALL_TYPE_RUNNER"
+
+def create_check_stdin_xml(expected_stdin):
+ def stdin_xml_check(stdin, command, order_num):
+ assert_xml_equal(
+ expected_stdin,
+ stdin,
+ (
+ "Trying to run command no. {0}"
+ "\n\n '{1}'\n\nwith expected xml stdin.\n"
+ ).format(order_num, command)
+ )
+ return stdin_xml_check
+
+def create_check_stdin_equal(expected_stdin):
+ def stdin_equal_check(stdin, command, order_num):
+ if stdin != expected_stdin:
+ raise AssertionError(
+ (
+ "With command\n\n '{0}'"
+ "\n\nexpected stdin:\n\n'{1}'"
+ "\n\nbut was:\n\n'{2}'"
+ )
+ .format(command, expected_stdin, stdin)
+ )
+
+ return stdin_equal_check
+
+
+def check_no_stdin(stdin, command, order_num):
+ if stdin:
+ raise AssertionError(
+ (
+ "With command\n\n '{0}'\n\nno stdin expected but was"
+ "\n\n'{1}'"
+ )
+ .format(command, stdin)
+ )
+
+COMMAND_COMPLETIONS = {
+ "cibadmin": path.join(settings.pacemaker_binaries, "cibadmin"),
+ "corosync": path.join(settings.corosync_binaries, "corosync"),
+ "corosync-cfgtool": path.join(
+ settings.corosync_binaries, "corosync-cfgtool"
+ ),
+ "crm_diff": path.join(settings.pacemaker_binaries, "crm_diff"),
+ "crm_mon": path.join(settings.pacemaker_binaries, "crm_mon"),
+ "crm_resource": path.join(settings.pacemaker_binaries, "crm_resource"),
+ "crm_verify": path.join(settings.pacemaker_binaries, "crm_verify"),
+ "sbd": settings.sbd_binary,
+}
+
+def complete_command(command):
+ for shortcut, full_path in COMMAND_COMPLETIONS.items():
+ if command.startswith("{0} ".format(shortcut)):
+ return full_path + command[len(shortcut):]
+ return command
+
+def bad_call(order_num, expected_command, entered_command):
+ return (
+ "As {0}. command expected\n '{1}'\nbut was\n '{2}'"
+ .format(order_num, expected_command, entered_command)
+ )
+
+
+class Call(object):
+ type = CALL_TYPE_RUNNER
+
+ def __init__(
+ self, command, stdout="", stderr="", returncode=0, check_stdin=None
+ ):
+ """
+ callable check_stdin raises AssertionError when given stdin doesn't
+ match
+ """
+ self.type = CALL_TYPE_RUNNER
+ self.command = complete_command(command)
+ self.stdout = stdout
+ self.stderr = stderr
+ self.returncode = returncode
+ self.check_stdin = check_stdin if check_stdin else check_no_stdin
+
+ def __repr__(self):
+ return str("<Runner '{0}' returncode='{1}'>").format(
+ self.command,
+ self.returncode
+ )
+
+
+class Runner(object):
+ def __init__(self, call_queue=None, env_vars=None):
+ self.__call_queue = call_queue
+ self.__env_vars = env_vars if env_vars else {}
+
+ @property
+ def env_vars(self):
+ return self.__env_vars
+
+ def run(
+ self, args, stdin_string=None, env_extend=None, binary_output=False
+ ):
+ command = " ".join(args)
+ i, call = self.__call_queue.take(CALL_TYPE_RUNNER, command)
+
+ if command != call.command:
+ raise self.__call_queue.error_with_context(
+ bad_call(i, call.command, command)
+ )
+
+ call.check_stdin(stdin_string, command, i)
+ return call.stdout, call.stderr, call.returncode
diff --git a/pcs/test/tools/command_env/spy.py b/pcs/test/tools/command_env/spy.py
new file mode 100644
index 0000000..5cf20ab
--- /dev/null
+++ b/pcs/test/tools/command_env/spy.py
@@ -0,0 +1,101 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+try:
+ # python 2
+ from urlparse import parse_qs
+except ImportError:
+ # python 3
+ from urllib.parse import parse_qs
+
+from pcs.lib.corosync.live import(
+ get_local_corosync_conf as original_get_local_corosync_conf
+)
+
+def print_caption(caption, indent=2, underline="-"):
+ print("\n{0}{1}\n{0}{2}".format(
+ " "*indent,
+ caption,
+ underline*len(caption)
+ ))
+
+
+def print_initialize(spy):
+ print_caption(
+ "Initialize {0}".format(spy.__class__.__name__),
+ indent=0,
+ underline="=",
+ )
+
+def print_call(spy, name):
+ print_caption("{0}: {1}".format(spy.__class__.__name__, name))
+
+def print_line(content):
+ print(" {0}".format(content))
+
+def print_long_text(name, potentially_long_text):
+ if "\n" not in potentially_long_text:
+ print_line("{0}: '{1}'".format(name, potentially_long_text))
+ else:
+ print_line("{0}: '''\\".format(name))
+ for line in potentially_long_text.split("\n"):
+ print_line(" {0}".format(line))
+ print_line("'''")
+
+
+class NodeCommunicator(object):
+ def __init__(self, original_node_communicator):
+ print_initialize(self)
+ self.__communicator = original_node_communicator
+
+ def add_requests(self, request_list):
+ print_call(self, "add_requests")
+ for request in request_list:
+ print_line(request)
+ print_line(parse_qs(request.data))
+ return self.__communicator.add_requests(request_list)
+
+ def start_loop(self):
+ for response in self.__communicator.start_loop():
+ print_call(self, "yield response start")
+ print_line(response)
+ yield response
+
+
+class Runner(object):
+ def __init__(self, original_runner):
+ print_initialize(self)
+ self.__runner = original_runner
+
+ def run(
+ self, args, stdin_string=None, env_extend=None, binary_output=False
+ ):
+ print_call(self, "run")
+ print_line("args: {0}".format(args))
+ if stdin_string:
+ print_long_text("stdin_string", stdin_string)
+ if env_extend:
+ print_line("env_extend: {0}".format(env_extend))
+ if binary_output:
+ print_line("binary_output: {0}".format(binary_output))
+ stdout, stderr, returncode = self.__runner.run(
+ args,
+ stdin_string,
+ env_extend,
+ binary_output,
+ )
+ print_long_text("stdout", stdout)
+ print_long_text("stderr", stderr)
+ print_line("returncode:{0}".format(returncode))
+ return stdout, stderr, returncode
+
+
+def get_local_corosync_conf():
+ print_caption("get_local_corosync_conf", indent=0)
+ corosync_conf = original_get_local_corosync_conf()
+ for line in corosync_conf.split("\n"):
+ print_line(line)
+ return corosync_conf
diff --git a/pcs/test/tools/command_env/tools.py b/pcs/test/tools/command_env/tools.py
new file mode 100644
index 0000000..2331d41
--- /dev/null
+++ b/pcs/test/tools/command_env/tools.py
@@ -0,0 +1,41 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from pcs.test.tools.command_env.assistant import EnvAssistant
+from pcs.test.tools.command_env.config_runner_cib import CIB_FILENAME
+from pcs.test.tools.command_env.config_runner_pcmk import (
+ DEFAULT_WAIT_TIMEOUT,
+ WAIT_TIMEOUT_EXPIRED_RETURNCODE,
+)
+
+
+def get_env_tools(
+ test_case,
+ base_cib_filename=CIB_FILENAME,
+ default_wait_timeout=DEFAULT_WAIT_TIMEOUT,
+ default_wait_error_returncode=WAIT_TIMEOUT_EXPIRED_RETURNCODE,
+ exception_reports_in_processor_by_default=True,
+):
+ """
+ Shortcut for preparing EnvAssistant and Config
+
+ TestCase test_case -- corresponding test_case is used to registering cleanup
+ method - to assert that everything is finished
+ """
+
+ env_assistant = EnvAssistant(
+ test_case=test_case,
+ exception_reports_in_processor_by_default
+ =exception_reports_in_processor_by_default
+ ,
+ )
+
+ runner = env_assistant.config.runner
+ runner.cib.cib_filename = base_cib_filename
+ runner.pcmk.default_wait_timeout = default_wait_timeout
+ runner.pcmk.default_wait_error_returncode = default_wait_error_returncode
+
+ return env_assistant, env_assistant.config
diff --git a/pcs/test/tools/custom_mock.py b/pcs/test/tools/custom_mock.py
index 86f862b..1d545dc 100644
--- a/pcs/test/tools/custom_mock.py
+++ b/pcs/test/tools/custom_mock.py
@@ -2,9 +2,10 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
+import io
+
from pcs.cli.common.reports import LibraryReportProcessorToConsole
import pcs.common.pcs_pycurl as pycurl
from pcs.lib.errors import LibraryError, ReportItemSeverity
@@ -15,35 +16,61 @@ class MockLibraryReportProcessor(LibraryReportProcessorToConsole):
def __init__(self, debug=False, raise_on_errors=True):
super(MockLibraryReportProcessor, self).__init__(debug)
self.raise_on_errors = raise_on_errors
+ self.direct_sent_items = []
@property
def report_item_list(self):
return self.items
+ def report_list(self, report_item_list):
+ self.direct_sent_items.extend(report_item_list)
+ return self._send(report_item_list)
+
def send(self):
+ errors = self._send(self.items, print_errors=False)
+ if errors and self.raise_on_errors:
+ raise LibraryError(*errors)
+
+ def assert_reports(self, expected_report_info_list, hint=""):
+ assert_report_item_list_equal(
+ self.report_item_list + self.direct_sent_items,
+ expected_report_info_list,
+ hint=hint
+ )
+
+ def _send(self, report_item_list, print_errors=True):
errors = []
- for report_item in self.items:
+ for report_item in report_item_list:
if report_item.severity == ReportItemSeverity.ERROR:
errors.append(report_item)
- if errors and self.raise_on_errors:
- raise LibraryError(*errors)
+ return errors
- def assert_reports(self, report_info_list):
- assert_report_item_list_equal(self.report_item_list, report_info_list)
class MockCurl(object):
- def __init__(self, info, output="", debug_output_list=None, exception=None):
+ def __init__(
+ self, info=None, output=b"", debug_output_list=None, exception=None,
+ error=None, request=None
+ ):
+ # we don't need exception anymore, because we don't use perform on
+ # easy hanlers. but for now it has to stay as it is because it is sill
+ # used from old communicator tests
self._opts = {}
self._info = info if info else {}
self._output = output
- self._debug_output_list = debug_output_list
+ self._debug_output_list = debug_output_list if debug_output_list else []
+ self._error = error
self._exception = exception
+ self.request_obj = request
@property
def opts(self):
return self._opts
+ @property
+ def error(self):
+ return self._error
+
def reset(self):
self._opts = {}
@@ -66,6 +93,8 @@ class MockCurl(object):
AssertionError("info '#{0}' not defined".format(opt))
def perform(self):
+ if self._error:
+ return
if self._exception:
#pylint: disable=raising-bad-type
raise self._exception
@@ -75,3 +104,83 @@ class MockCurl(object):
for msg_type, msg in self._debug_output_list:
self._opts[pycurl.DEBUGFUNCTION](msg_type, msg)
+
+class MockCurlSimple(object):
+ def __init__(self, info=None, output=b"", debug_output=b"", request=None):
+ self.output_buffer = io.BytesIO()
+ self.output_buffer.write(output)
+ self.debug_buffer = io.BytesIO()
+ self.debug_buffer.write(debug_output)
+ self.request_obj = request
+ self._info = info if info else {}
+
+ def getinfo(self, opt):
+ try:
+ return self._info[opt]
+ except KeyError:
+ AssertionError("info '#{0}' not defined".format(opt))
+
+
+class MockCurlMulti(object):
+ def __init__(self, number_of_performed_list):
+ self._number_of_performed_list = number_of_performed_list
+ self._opts = {}
+ self._handle_list = []
+ self._proccessed_list = []
+
+ @property
+ def opts(self):
+ return self._opts
+
+ def setopt(self, opt, val):
+ self._opts[opt] = val
+
+ def add_handle(self, handle):
+ if not isinstance(handle, MockCurl):
+ raise AssertionError("Only MockCurl objects are allowed")
+ if handle in self._handle_list:
+ # same error as real CurlMulti object
+ raise pycurl.error("curl object already on this multi-stack")
+ self._handle_list.append(handle)
+
+ def remove_handle(self, handle):
+ if handle not in self._handle_list:
+ # same error as real CurlMulti object
+ raise pycurl.error("curl object not on this multi-stack")
+ self._handle_list.remove(handle)
+
+ def assert_no_handle_left(self):
+ if self._handle_list:
+ raise AssertionError(
+ "{0} handle(s) left to process".format(len(self._handle_list))
+ )
+
+ def select(self, timeout=1):
+ return 0
+
+ def perform(self):
+ return (0, 0)
+
+ def timeout(self):
+ return 0
+
+ def info_read(self):
+ ok_list = []
+ err_list = []
+ if not self._number_of_performed_list:
+ raise AssertionError("unexpected info_read call")
+ number_to_perform = self._number_of_performed_list.pop(0)
+ if number_to_perform > len(self._handle_list):
+ raise AssertionError("expecting more handles than prepared")
+ for handle in self._handle_list[:number_to_perform]:
+ try:
+ handle.perform()
+ if handle.error:
+ err_list.append((handle, handle.error[0], handle.error[1]))
+ else:
+ ok_list.append(handle)
+ except pycurl.error as e:
+ errno, msg = e.args
+ err_list.append((handle, errno, msg))
+ self._proccessed_list.append(handle)
+ return (0, ok_list, err_list)
diff --git a/pcs/lib/commands/test/resource/fixture.py b/pcs/test/tools/fixture.py
similarity index 53%
rename from pcs/lib/commands/test/resource/fixture.py
rename to pcs/test/tools/fixture.py
index 8d96dc9..fe8954b 100644
--- a/pcs/lib/commands/test/resource/fixture.py
+++ b/pcs/test/tools/fixture.py
@@ -2,100 +2,13 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-from lxml import etree
-
from pcs.common import report_codes
from pcs.lib.errors import ReportItemSeverity as severities
-from pcs.test.tools.integration_lib import Call
-from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.xml import etree_to_str
-
-
-def call_cib_load(cib):
- return [
- Call("cibadmin --local --query", cib),
- ]
-
-def call_cib_push(cib):
- return [
- Call(
- "cibadmin --replace --verbose --xml-pipe --scope configuration",
- check_stdin=Call.create_check_stdin_xml(cib)
- ),
- ]
-
-def call_cib_upgrade():
- return [
- Call("cibadmin --upgrade --force"),
- ]
-
-def call_status(status):
- return [
- Call("/usr/sbin/crm_mon --one-shot --as-xml --inactive", status),
- ]
-
-def call_wait_supported():
- return [
- Call("crm_resource -?", "--wait"),
- ]
-def call_wait(timeout, retval=0, stderr=""):
- return [
- Call(
- "crm_resource --wait --timeout={0}".format(timeout),
- stderr=stderr,
- returncode=retval
- ),
- ]
-def call_dummy_metadata():
- return [
- Call(
- "crm_resource --show-metadata ocf:heartbeat:Dummy",
- open(rc("resource_agent_ocf_heartbeat_dummy.xml")).read()
- ),
- ]
-
-def calls_cib(cib_pre, cib_post, cib_base_file=None):
- return (
- call_cib_load(cib_resources(cib_pre, cib_base_file=cib_base_file))
- +
- call_cib_push(cib_resources(cib_post, cib_base_file=cib_base_file))
- )
-
-def calls_cib_and_status(cib_pre, status, cib_post, cib_base_file=None):
- return (
- call_cib_load(cib_resources(cib_pre, cib_base_file=cib_base_file))
- +
- call_status(state_complete(status))
- +
- call_cib_push(cib_resources(cib_post, cib_base_file=cib_base_file))
- )
-
-def calls_cib_load_and_upgrade(cib_old_version):
- return (
- call_cib_load(cib_resources(cib_old_version))
- +
- call_cib_upgrade()
- )
-
-
-
-def cib_resources(cib_resources_xml, cib_base_file=None):
- cib_xml = open(rc(cib_base_file or "cib-empty.xml")).read()
- cib = etree.fromstring(cib_xml)
- resources_section = cib.find(".//resources")
- for child in etree.fromstring(cib_resources_xml):
- resources_section.append(child)
- return etree_to_str(cib)
-
-
-def state_complete(resource_status_xml):
- status = etree.parse(rc("crm_mon.minimal.xml")).getroot()
- resource_status = etree.fromstring(resource_status_xml)
+def complete_state_resources(resource_status):
for resource in resource_status.xpath(".//resource"):
_default_element_attributes(
resource,
@@ -128,14 +41,25 @@ def state_complete(resource_status_xml):
"failed": "false",
}
)
- status.append(resource_status)
- return etree_to_str(status)
+ return resource_status
+
def _default_element_attributes(element, default_attributes):
for name, value in default_attributes.items():
if name not in element.attrib:
element.attrib[name] = value
+def debug(code, **kwargs):
+ return severities.DEBUG, code, kwargs, None
+
+def warn(code, **kwargs):
+ return severities.WARNING, code, kwargs, None
+
+def error(code, force_code=None, **kwargs):
+ return severities.ERROR, code, kwargs, force_code
+
+def info(code, **kwargs):
+ return severities.INFO, code, kwargs, None
def report_not_found(res_id, context_type=""):
return (
diff --git a/pcs/test/tools/fixture_cib.py b/pcs/test/tools/fixture_cib.py
new file mode 100644
index 0000000..cd1c7b1
--- /dev/null
+++ b/pcs/test/tools/fixture_cib.py
@@ -0,0 +1,170 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+)
+
+from lxml import etree
+
+from pcs.common.tools import is_string
+from pcs.test.tools.xml import etree_to_str
+
+
+def _replace(element_to_replace, new_element):
+ parent = element_to_replace.getparent()
+ for child in parent:
+ if element_to_replace == child:
+ index = list(parent).index(child)
+ parent.remove(child)
+ parent.insert(index, new_element)
+ return
+
+def _xml_to_element(xml):
+ try:
+ new_element = etree.fromstring(xml)
+ except etree.XMLSyntaxError:
+ raise AssertionError(
+ "Cannot put to the cib a non-xml fragment:\n'{0}'"
+ .format(xml)
+ )
+ return new_element
+
+def _find_in(cib_tree, element_xpath):
+ element = cib_tree.find(element_xpath)
+ if element is None:
+ raise AssertionError(
+ "Cannot find '{0}' in given cib:\n{1}".format(
+ element_xpath,
+ etree_to_str(cib_tree)
+ )
+ )
+ return element
+
+def remove(element_xpath):
+ def remove(cib_tree):
+ xpath_list = (
+ [element_xpath] if is_string(element_xpath) else element_xpath
+ )
+ for xpath in xpath_list:
+ element_to_remove = _find_in(cib_tree, xpath)
+ element_to_remove.getparent().remove(element_to_remove)
+ return remove
+
+def put_or_replace(parent_xpath, new_content):
+ #This tranformation makes sense in "configuration" section only. In this
+ #section there are sub-tags (optional or mandatory) that can occure max 1x.
+ #
+ #In other sections it is possible to have more occurences of sub-tags. For
+ #such cases it is better to use `replace_all` - the difference is that in
+ #`replace_all` the element to be replaced is specified by full xpath
+ #whilst in `put_or_replace` the xpath to the parent element is specified.
+ def replace_optional(cib_tree):
+ element = _xml_to_element(new_content)
+ parent = _find_in(cib_tree, parent_xpath)
+ current_elements = parent.findall(element.tag)
+
+ if len(current_elements) > 1:
+ raise _cannot_multireplace(element.tag, parent_xpath, cib_tree)
+
+ if current_elements:
+ _replace(current_elements[0], element)
+ else:
+ parent.append(element)
+
+ return replace_optional
+
+def replace_all(replacements):
+ """
+ Return a function that replace more elements (defined by replacement_dict)
+ in the cib_tree with new_content.
+
+ dict replacemens -- contains more replacements:
+ key is xpath - its destination must be one element: replacement is
+ applied only on the first occurence
+ value is new content -contains a content that have to be placed instead
+ of an element found by element_xpath
+ """
+ def replace(cib_tree):
+ for xpath, new_content in replacements.items():
+ _replace(_find_in(cib_tree, xpath), _xml_to_element(new_content))
+ return replace
+
+#Possible modifier shortcuts are defined here.
+#Keep in mind that every key will be named parameter in config function
+#(see modifier_shortcuts param in some of pcs.test.tools.command_env.config_*
+#modules)
+#
+#DO NOT USE CONFLICTING KEYS HERE!
+#1) args of pcs.test.tools.command_env.calls#CallListBuilder.place:
+# name, before, instead
+#2) args of pcs.test.tools.command_env.mock_runner#Call.__init__
+# command, stdout, stderr, returncode, check_stdin
+#3) special args of pcs.test.tools.command_env.config_*
+# modifiers, filename, load_key, wait, exception
+#It would be not aplied. Not even mention that the majority of these names do
+#not make sense for a cib modifying ;)
+MODIFIER_GENERATORS = {
+ "remove": remove,
+ "replace": replace_all,
+ "resources": lambda xml: replace_all({"./configuration/resources": xml}),
+ "optional_in_conf": lambda xml: put_or_replace("./configuration", xml),
+ #common modifier `put_or_replace` makes not sense - see explanation inside
+ #this function - all occurences should be satisfied by `optional_in_conf`
+}
+
+def create_modifiers(**modifier_shortcuts):
+ """
+ Return list of modifiers: list of functions that transform cib
+
+ dict modifier_shortcuts -- a new modifier is generated from each modifier
+ shortcut.
+ As key there can be keys of MODIFIER_GENERATORS.
+ Value is passed into appropriate generator from MODIFIER_GENERATORS.
+
+ """
+ unknown_shortcuts = (
+ set(modifier_shortcuts.keys()) - set(MODIFIER_GENERATORS.keys())
+ )
+ if unknown_shortcuts:
+ raise AssertionError(
+ "Unknown modifier shortcuts '{0}', available are: '{1}'".format(
+ "', '".join(list(unknown_shortcuts)),
+ "', '".join(MODIFIER_GENERATORS.keys()),
+ )
+ )
+
+ return [
+ MODIFIER_GENERATORS[name](param)
+ for name, param in modifier_shortcuts.items()
+ ]
+
+def modify_cib(cib_xml, modifiers=None, **modifier_shortcuts):
+ """
+ Apply modifiers to cib_xml and return the result cib_xml
+
+ string cib_xml -- initial cib
+ list of callable modifiers -- each takes cib (etree.Element)
+ dict modifier_shortcuts -- a new modifier is generated from each modifier
+ shortcut.
+ As key there can be keys of MODIFIER_GENERATORS.
+ Value is passed into appropriate generator from MODIFIER_GENERATORS.
+ """
+ modifiers = modifiers if modifiers else []
+ all_modifiers = modifiers + create_modifiers(**modifier_shortcuts)
+
+ if not all_modifiers:
+ return cib_xml
+
+ cib_tree = etree.fromstring(cib_xml)
+ for modify in all_modifiers:
+ modify(cib_tree)
+
+ return etree_to_str(cib_tree)
+
+def _cannot_multireplace(tag, parent_xpath, cib_tree):
+ return AssertionError(
+ (
+ "Cannot replace '{element}' in '{parent}' because '{parent}'"
+ " contains more than one '{element}' in given cib:\n{cib}"
+ ).format( element=tag, parent=parent_xpath, cib=etree_to_str(cib_tree))
+ )
diff --git a/pcs/test/tools/integration_lib.py b/pcs/test/tools/integration_lib.py
index 3336d51..0c0befa 100644
--- a/pcs/test/tools/integration_lib.py
+++ b/pcs/test/tools/integration_lib.py
@@ -2,126 +2,36 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-from os import path
+from pcs.test.tools.command_env.calls import Queue, CallListBuilder
+from pcs.test.tools.command_env.mock_runner import Runner as NewRunner
-from pcs.test.tools.assertions import assert_xml_equal
-from pcs import settings
+#TODO please remove this module when Runner is not used. The only usage is
+# in the module pcs.test.test_lib_commands_sbd currently.
-class Call(object):
- command_completions = {
- "crm_resource": path.join(settings.pacemaker_binaries, "crm_resource"),
- "cibadmin": path.join(settings.pacemaker_binaries, "cibadmin"),
- "crm_mon": path.join(settings.pacemaker_binaries, "crm_mon"),
- "sbd": settings.sbd_binary,
- }
-
- @staticmethod
- def create_check_stdin_xml(expected_stdin):
- def stdin_xml_check(stdin, command, order_num):
- assert_xml_equal(
- expected_stdin,
- stdin,
- (
- "Trying to run command no. {0}"
- "\n\n '{1}'\n\nwith expected xml stdin.\n"
- ).format(order_num, command)
- )
- return stdin_xml_check
-
- def __init__(
- self, command, stdout="", stderr="", returncode=0, check_stdin=None
- ):
- """
- callable check_stdin raises AssertionError when given stdin doesn't match
- """
- self.command = self.__complete_command(command)
- self.stdout = stdout
- self.stderr = stderr
- self.returncode = returncode
- self.check_stdin = check_stdin if check_stdin else self.__check_no_stdin
-
- def __complete_command(self, command):
- for shortcut, full_path in self.command_completions.items():
- if command.startswith("{0} ".format(shortcut)):
- return full_path + command[len(shortcut):]
- return command
-
- def __check_no_stdin(self, stdin, command, order_num):
- if stdin:
- raise AssertionError(
- (
- "With command\n\n '{0}'\n\nno stdin expected but was"
- "\n\n'{1}'"
- )
- .format(command, stdin)
- )
-
- @property
- def result(self):
- return self.stdout, self.stderr, self.returncode
class Runner(object):
def __init__(self):
self.set_runs([])
- def assert_can_take_next_run(self, command, stdin_string):
- if not self.run_list:
- raise AssertionError(
- (
- "No next run expected, but was:\n '{command}'{stdin}\n"
- "already launched:\n{already_launched}"
- ).format(
- command=command,
- stdin=(
- "" if not stdin_string else "\nwith stdin:\n\n{0}\n"
- .format(stdin_string)
- ),
- already_launched=" " + "\n ".join([
- "'{0}'".format(run.command)
- for run in self.already_launched_list
- ])
- )
- )
- return self.run_list.pop(0)
-
- def assert_command_match(self, expected_command, entered_command):
- if entered_command != expected_command:
- raise AssertionError(
- "As {0}. command expected\n\n '{1}'\n\nbut was\n\n '{2}'"
- .format(
- self.current_order_num,
- expected_command,
- entered_command
- )
- )
-
def assert_everything_launched(self):
- if self.run_list:
+ if self.__call_queue.remaining:
raise AssertionError(
"There are remaining expected commands: \n '{0}'".format(
- "'\n '".join([call.command for call in self.run_list])
+ "'\n '".join([
+ call.command
+ for call in self.__call_queue.remaining
+ ])
)
)
- @property
- def current_order_num(self):
- return len(self.already_launched_list) + 1
-
- def run(
- self, args, stdin_string=None, env_extend=None, binary_output=False
- ):
- command = " ".join(args)
- next_run = self.assert_can_take_next_run(command, stdin_string)
- self.assert_command_match(next_run.command, command)
- next_run.check_stdin(stdin_string, command, self.current_order_num)
- self.already_launched_list.append(next_run)
- return next_run.result
-
def set_runs(self, run_list):
- self.run_list = run_list
- self.already_launched_list = []
+ call_list_builder = CallListBuilder()
+ for run in run_list:
+ call_list_builder.place("", run)
+
+ self.__call_queue = Queue(call_list_builder)
+ self.run = NewRunner(self.__call_queue).run
diff --git a/pcs/test/tools/misc.py b/pcs/test/tools/misc.py
index 8423cf4..fa8c6c0 100644
--- a/pcs/test/tools/misc.py
+++ b/pcs/test/tools/misc.py
@@ -2,15 +2,16 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
-import difflib
-import os.path
+import logging
+import os
import re
-from pcs import utils
+# from pcs import utils
from pcs.common.tools import is_string
+from pcs.lib.external import CommandRunner, is_service_enabled
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import (
mock,
skipUnless,
@@ -19,22 +20,11 @@ from pcs.test.tools.pcs_unittest import (
testdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-def prepare_diff(first, second):
- """
- Return a string containing a diff of first and second
- """
- return "".join(
- difflib.Differ().compare(first.splitlines(1), second.splitlines(1))
- )
-
-def ac(a,b):
- """
- Compare the actual output 'a' and an expected output 'b', print diff b a
- """
- if a != b:
- raise AssertionError(
- "strings not equal:\n{0}".format(prepare_diff(b, a))
- )
+runner = CommandRunner(
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor(),
+ os.environ
+)
def get_test_resource(name):
"""Return full path to a test resource file specified by name"""
@@ -53,7 +43,7 @@ def compare_version(a, b):
return cmp3(a[0], b[0])
def is_minimum_pacemaker_version(cmajor, cminor, crev):
- output, dummy_retval = utils.run(["crm_mon", "--version"])
+ output, dummy_stderr, dummy_retval = runner.run(["crm_mon", "--version"])
pacemaker_version = output.split("\n")[0]
r = re.compile(r"Pacemaker (\d+)\.(\d+)\.(\d+)")
m = r.match(pacemaker_version)
@@ -63,7 +53,9 @@ def is_minimum_pacemaker_version(cmajor, cminor, crev):
return compare_version((major, minor, rev), (cmajor, cminor, crev)) > -1
def is_minimum_pacemaker_features(cmajor, cminor, crev):
- output, dummy_retval = utils.run(["pacemakerd", "--features"])
+ output, dummy_stderr, dummy_retval = runner.run(
+ ["pacemakerd", "--features"]
+ )
features_version = output.split("\n")[1]
r = re.compile(r"Supporting v(\d+)\.(\d+)\.(\d+):")
m = r.search(features_version)
@@ -98,12 +90,20 @@ skip_unless_pacemaker_supports_bundle = skip_unless_pacemaker_features(
)
def skip_unless_pacemaker_supports_systemd():
- output, dummy_retval = utils.run(["pacemakerd", "--features"])
+ output, dummy_stderr, dummy_retval = runner.run(
+ ["pacemakerd", "--features"]
+ )
return skipUnless(
"systemd" in output,
"Pacemaker does not support systemd resources"
)
+def skip_if_service_enabled(service_name):
+ return skipUnless(
+ not is_service_enabled(runner, service_name),
+ "Service {0} must be disabled".format(service_name),
+ )
+
def create_patcher(target_prefix_or_module):
"""
Return function for patching tests with preconfigured target prefix
diff --git a/pcs/test/tools/pcs_runner.py b/pcs/test/tools/pcs_runner.py
index 584a1d9..81601a9 100644
--- a/pcs/test/tools/pcs_runner.py
+++ b/pcs/test/tools/pcs_runner.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os.path
diff --git a/pcs/test/tools/xml.py b/pcs/test/tools/xml.py
index 27418b6..fed91ef 100644
--- a/pcs/test/tools/xml.py
+++ b/pcs/test/tools/xml.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import xml.dom.minidom
diff --git a/pcs/usage.py b/pcs/usage.py
index d047d55..96b98fb 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import re
@@ -161,6 +160,9 @@ Options:
--version Print pcs version information.
--request-timeout Timeout for each outgoing request to another node in
seconds. Default is 60s.
+ --force Override checks and errors, the exact behavior depends on
+ the command. WARNING: Using the --force option is
+ strongly discouraged unless you know what you are doing.
Commands:
cluster Configure cluster options and nodes.
@@ -561,7 +563,8 @@ Usage: pcs cluster [commands]...
Configure cluster for use with pacemaker
Commands:
- auth [node] [...] [-u username] [-p password] [--force] [--local]
+ auth [<node>[:<port>]] [...] [-u <username>] [-p <password>] [--force]
+ [--local]
Authenticate pcs to pcsd on nodes specified, or on all nodes
configured in the local cluster if no nodes are specified (authorization
tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root).
@@ -636,20 +639,22 @@ Commands:
ttl defaults to 1. If --broadcast is specified, --mcast0/1,
--mcastport0/1 & --ttl0/1 are ignored.
- start [--all | <node>... ] [--wait[=<n>]]
- Start corosync & pacemaker on specified node(s), if a node is not
- specified then corosync & pacemaker are started on the local node.
- If --all is specified then corosync & pacemaker are started on all
- nodes. If --wait is specified, wait up to 'n' seconds for nodes
- to start.
+ start [--all | <node>... ] [--wait[=<n>]] [--request-timeout=<seconds>]
+ Start a cluster on specified node(s). If no nodes are specified then
+ start a cluster on the local node. If --all is specified then start
+ a cluster on all nodes. If the cluster has many nodes then the start
+ request may time out. In that case you should consider setting
+ --request-timeout to a suitable value. If --wait is specified, pcs
+ waits up to 'n' seconds for the cluster to get ready to provide
+ services after the cluster has successfully started.
stop [--all | <node>... ] [--request-timeout=<seconds>]
- Stop corosync & pacemaker on specified node(s), if a node is not
- specified then corosync & pacemaker are stopped on the local node. If
- --all is specified then corosync & pacemaker are stopped on all nodes.
- If the cluster is running resources which take long time to stop, the
- request may time out before the cluster actually stops. In that case you
- should consider setting --request-timeout to a suitable value.
+ Stop a cluster on specified node(s). If no nodes are specified then
+ stop a cluster on the local node. If --all is specified then stop
+ a cluster on all nodes. If the cluster is running resources which take
+ long time to stop then the stop request may time out before the cluster
+ actually stops. In that case you should consider setting
+ --request-timeout to a suitable value.
kill
Force corosync and pacemaker daemons to stop on the local node
@@ -805,7 +810,7 @@ Commands:
performed on the currently running cluster. If -V is used
more verbose output will be printed.
- report [--from "YYYY-M-D H:M:S" [--to "YYYY-M-D H:M:S"]] dest
+ report [--from "YYYY-M-D H:M:S" [--to "YYYY-M-D H:M:S"]] <dest>
Create a tarball containing everything needed when reporting cluster
problems. If --from and --to are not used, the report will include
the past 24 hours.
@@ -916,9 +921,11 @@ Commands:
call to stonith which will turn the node off instead of rebooting it).
confirm <node> [--force]
- Confirm that the host specified is currently down. This command
- should ONLY be used when the node specified has already been confirmed
- to be powered off and to have no access to shared resources.
+ Confirm to the cluster that the specified node is powered off. This
+ allows the cluster to recover from a situation where no stonith device
+ is able to fence the node. This command should ONLY be used after
+ manually ensuring that the node is powered off and has no access to
+ shared resources.
WARNING: If this node is not actually powered off or it does have
access to shared resources, data corruption/cluster failure can occur.
diff --git a/pcs/utils.py b/pcs/utils.py
index 081ee65..174f8e7 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -2,7 +2,6 @@ from __future__ import (
absolute_import,
division,
print_function,
- unicode_literals,
)
import os
@@ -65,9 +64,13 @@ from pcs.lib.external import (
_service,
_systemctl,
)
+from pcs.lib.communication.nodes import (
+ availability_checker_node,
+ PrecheckNewNode,
+)
+from pcs.lib.communication.tools import run as run_com_cmd
import pcs.lib.corosync.config_parser as corosync_conf_parser
from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
-from pcs.lib.nodes_task import check_can_add_node_to_cluster
from pcs.lib.pacemaker.live import has_wait_for_idle_support
from pcs.lib.pacemaker.state import ClusterState
from pcs.lib.pacemaker.values import(
@@ -117,9 +120,8 @@ def checkAndUpgradeCIB(major,minor,rev):
cmajor, cminor, crev = getValidateWithVersion(get_cib_dom())
if cmajor > major or (cmajor == major and cminor > minor) or (cmajor == major and cminor == minor and crev >= rev):
return False
- else:
- cluster_upgrade()
- return True
+ cluster_upgrade()
+ return True
def cluster_upgrade():
output, retval = run(["cibadmin", "--upgrade", "--force"])
@@ -230,11 +232,17 @@ def remove_uid_gid_file(uid,gid):
return file_removed
# Returns a dictionary {'nodeA':'tokenA'}
def readTokens():
- tokens = {}
+ return read_token_file()["tokens"]
+
+def read_token_file():
+ data = {
+ "tokens": {},
+ "ports": {},
+ }
output, retval = run_pcsdcli("read_tokens")
if retval == 0 and output['status'] == 'ok' and output['data']:
- tokens = output['data']
- return tokens
+ data = output['data']
+ return data
def repeat_if_timeout(send_http_request_function, repeat_count=15):
def repeater(node, *args, **kwargs):
@@ -274,8 +282,14 @@ def getPacemakerNodeStatus(node):
node, "remote/pacemaker_node_status", None, False, False
)
-def startCluster(node, quiet=False):
- return sendHTTPRequest(node, 'remote/cluster_start', None, False, not quiet)
+def startCluster(node, quiet=False, timeout=None):
+ return sendHTTPRequest(
+ node,
+ "remote/cluster_start",
+ printResult=False,
+ printSuccess=not quiet,
+ timeout=timeout
+ )
def stopPacemaker(node, quiet=False, force=True):
return stopCluster(
@@ -328,7 +342,7 @@ def resumeConfigSyncing(node):
data = urllib_urlencode({"sync_thread_resume": 1})
return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
-def canAddNodeToCluster(node_communicator, node):
+def canAddNodeToCluster(node_communicator, target):
"""
Return tuple with two parts. The first part is information if the node can
be added to a cluster. The second part is a relevant explanation for first
@@ -338,7 +352,9 @@ def canAddNodeToCluster(node_communicator, node):
NodeAddresses node contain destination for request
"""
report_list = []
- check_can_add_node_to_cluster(node_communicator, node, report_list)
+ com_cmd = PrecheckNewNode(report_list, availability_checker_node)
+ com_cmd.add_request(target)
+ run_com_cmd(node_communicator, com_cmd)
analyzer = ReportListAnalyzer(report_list)
if not analyzer.error_list:
@@ -383,8 +399,7 @@ def addLocalNode(node, node_to_add, ring1_addr=None):
except ValueError:
return 1, output
return retval2, output
- else:
- return 1, output
+ return 1, output
def removeLocalNode(node, node_to_remove, pacemaker_remove=False):
data = urllib_urlencode({'remove_nodename':node_to_remove, 'pacemaker_remove':pacemaker_remove})
@@ -395,8 +410,7 @@ def removeLocalNode(node, node_to_remove, pacemaker_remove=False):
except ValueError:
return 1,output
return 0, myout
- else:
- return 1, output
+ return 1, output
# Send an HTTP request to a node return a tuple with status, data
@@ -411,7 +425,13 @@ def removeLocalNode(node, node_to_remove, pacemaker_remove=False):
def sendHTTPRequest(
host, request, data=None, printResult=True, printSuccess=True, timeout=None
):
- url = "https://{host}:2224/{request}".format(host=host, request=request)
+ token_file = read_token_file()
+ port = token_file["ports"].get(host)
+ if port is None:
+ port = settings.pcsd_default_port
+ url = "https://{host}:{port}/{request}".format(
+ host=host, request=request, port=port
+ )
if "--debug" in pcs_options:
print("Sending HTTP Request to: " + url)
print("Data: {0}".format(data))
@@ -432,7 +452,7 @@ def sendHTTPRequest(
output = BytesIO()
debug_output = BytesIO()
- cookies = __get_cookie_list(host, readTokens())
+ cookies = __get_cookie_list(host, token_file["tokens"])
if not timeout:
timeout = settings.default_request_timeout
if "--request-timeout" in pcs_options:
@@ -506,9 +526,10 @@ def sendHTTPRequest(
dummy_errno, reason = e.args
if "--debug" in pcs_options:
print("Response Reason: {0}".format(reason))
- msg = "Unable to connect to {host} ({reason})".format(
- host=host, reason=reason
- )
+ msg = (
+ "Unable to connect to {host}, try setting higher timeout in "
+ "--request-timeout option ({reason})"
+ ).format(host=host, reason=reason)
if printResult:
print(msg)
return (2, msg)
@@ -799,9 +820,9 @@ def addNodeToClusterConf(node):
return True
-def removeNodeFromCorosync(node):
+def removeNodeFromCorosync(node_expression):
removed_node = False
- node0, node1 = parse_multiring_node(node)
+ node0, node1 = parse_multiring_node(node_expression)
corosync_conf = getCorosyncConfParsed()
for nodelist in corosync_conf.get_sections("nodelist"):
@@ -947,6 +968,13 @@ def get_cluster_conf_cman_options():
def subprocess_setup():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+def touch_cib_file(filename):
+ if not os.path.isfile(filename):
+ try:
+ write_empty_cib(filename)
+ except EnvironmentError as e:
+ err("Unable to write to file: '{0}': '{1}'".format(filename, str(e)))
+
# Run command, with environment and return (output, retval)
# DEPRECATED, please use lib.external.CommandRunner via utils.cmd_runner()
def run(
@@ -960,12 +988,7 @@ def run(
env_var["LC_ALL"] = "C"
if usefile:
env_var["CIB_file"] = filename
-
- if not os.path.isfile(filename):
- try:
- write_empty_cib(filename)
- except IOError:
- err("Unable to write to file: " + filename)
+ touch_cib_file(filename)
command = args[0]
if command[0:3] == "crm" or command in ["cibadmin", "cman_tool", "iso8601"]:
@@ -1076,7 +1099,7 @@ def run_pcsdcli(command, data=None):
def auth_nodes_do(nodes, username, password, force, local):
pcsd_data = {
- 'nodes': list(set(nodes)),
+ 'nodes': nodes,
'username': username,
'password': password,
'force': force,
@@ -1149,7 +1172,14 @@ def call_local_pcsd(argv, interactive_auth=False, std_in=None):
print('Please authenticate yourself to the local pcsd')
username = get_terminal_input('Username: ')
password = get_terminal_password()
- auth_nodes_do(["localhost"], username, password, True, True)
+ port = get_terminal_input(
+ 'Port (default: {0}): '.format(settings.pcsd_default_port)
+ # default port is not completely correct, because default port from
+ # pcsd will be used
+ )
+ if not port:
+ port = None
+ auth_nodes_do({"localhost": port}, username, password, True, True)
print()
code, output = sendHTTPRequest(
"localhost", "run_pcs", data_send, False, False
@@ -1686,7 +1716,7 @@ def is_etree(var):
)
# Replace only configuration section of cib with dom passed
-def replace_cib_configuration(dom, cib_upgraded=False):
+def replace_cib_configuration(dom):
if is_etree(dom):
#etree returns string in bytes: b'xml'
#python 3 removed .encode() from byte strings
@@ -1697,11 +1727,7 @@ def replace_cib_configuration(dom, cib_upgraded=False):
new_dom = dom.toxml()
else:
new_dom = dom
- cmd = ["cibadmin", "--replace", "-V", "--xml-pipe"]
- if cib_upgraded:
- print("CIB has been upgraded to the latest schema version.")
- else:
- cmd += ["-o", "configuration"]
+ cmd = ["cibadmin", "--replace", "-V", "--xml-pipe", "-o", "configuration"]
output, retval = run(cmd, False, new_dom)
if retval != 0:
err("Unable to update cib\n"+output)
@@ -1878,20 +1904,6 @@ def set_cib_property(prop, value, cib_dom=None):
if update_cib:
replace_cib_configuration(crm_config)
-def setAttribute(a_type, a_name, a_value, exit_on_error=False):
- args = ["crm_attribute", "--type", a_type, "--attr-name", a_name,
- "--attr-value", a_value]
-
- if a_value == "":
- args.append("-D")
-
- output, retval = run(args)
- if retval != 0:
- if exit_on_error:
- err(output)
- else:
- print(output)
-
def getTerminalSize(fd=1):
"""
Returns height and width of current terminal. First tries to get
@@ -1920,8 +1932,7 @@ def get_terminal_input(message=None):
try:
if PYTHON2:
return raw_input("")
- else:
- return input("")
+ return input("")
except EOFError:
return ""
except KeyboardInterrupt:
@@ -2631,14 +2642,13 @@ def is_valid_cib_value(type, value, enum_options=[]):
type = type.lower()
if type == "enum":
return value in enum_options
- elif type == "boolean":
+ if type == "boolean":
return is_boolean(value)
- elif type == "integer":
+ if type == "integer":
return is_score(value)
- elif type == "time":
+ if type == "time":
return get_timeout_seconds(value) is not None
- else:
- return True
+ return True
def get_cluster_property_default(prop_def_dict, prop):
@@ -2781,7 +2791,7 @@ def get_lib_env():
groups,
cib_data,
corosync_conf_data,
- auth_tokens_getter=readTokens,
+ token_file_data_getter=read_token_file,
request_timeout=pcs_options.get("--request-timeout"),
)
@@ -2800,14 +2810,14 @@ def get_cli_env():
env = Env()
env.user = user
env.groups = groups
- env.auth_tokens_getter = readTokens
+ env.token_file_data_getter = read_token_file
env.debug = "--debug" in pcs_options
env.request_timeout = pcs_options.get("--request-timeout")
return env
def get_middleware_factory():
return middleware.create_middleware_factory(
- cib=middleware.cib(usefile, get_cib, replace_cib_configuration),
+ cib=middleware.cib(filename if usefile else None, touch_cib_file),
corosync_conf_existing=middleware.corosync_conf_existing(
pcs_options.get("--corosync_conf", None)
),
diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
index a3fab96..dcb0d05 100644
--- a/pcsd/Gemfile.lock
+++ b/pcsd/Gemfile.lock
@@ -2,17 +2,17 @@ GEM
remote: https://rubygems.org/
remote: https://tojeline.fedorapeople.org/rubygems/
specs:
- backports (3.6.8)
+ backports (3.9.1)
ethon (0.10.1)
- ffi (1.9.17)
- json (2.0.3)
- multi_json (1.12.1)
+ ffi (1.9.18)
+ json (2.1.0)
+ multi_json (1.12.2)
open4 (1.3.4)
orderedhash (0.0.6)
rack (1.6.4)
rack-protection (1.5.3)
rack
- rack-test (0.6.3)
+ rack-test (0.7.0)
rack (>= 1.0)
rpam-ruby19 (1.2.1)
sinatra (1.4.8)
@@ -26,7 +26,7 @@ GEM
rack-test
sinatra (~> 1.4.0)
tilt (>= 1.3, < 3)
- tilt (2.0.6)
+ tilt (2.0.8)
PLATFORMS
ruby
diff --git a/pcsd/Makefile b/pcsd/Makefile
index 2ecd4de..d452ac0 100644
--- a/pcsd/Makefile
+++ b/pcsd/Makefile
@@ -1,4 +1,4 @@
-FFI_VERSION="1.9.17"
+FFI_VERSION="1.9.18"
FFI_C_DIR=vendor/bundle/ruby/gems/ffi-${FFI_VERSION}/ext/ffi_c
build_gems: get_gems
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 1b129b2..a97cc29 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -25,13 +25,11 @@ end
def is_systemctl()
systemctl_paths = [
- '/usr/bin/systemctl',
- '/bin/systemctl',
- '/var/run/systemd/system',
'/run/systemd/system',
+ '/var/run/systemd/system',
]
systemctl_paths.each { |path|
- return true if File.exist?(path)
+ return true if File.directory?(path)
}
return false
end
@@ -47,7 +45,7 @@ def get_pcs_path()
end
end
-PCS_VERSION = '0.9.159'
+PCS_VERSION = '0.9.160'
# unique instance signature, allows detection of dameon restarts
DAEMON_INSTANCE_SIGNATURE = Digest::SHA2.hexdigest("#{Time.now} #{rand()}")
COROSYNC = COROSYNC_BINARIES + "corosync"
@@ -78,6 +76,12 @@ def configure_logger(log_device)
else
logger.debug "Did not detect RHEL 6"
end
+
+ if ISSYSTEMCTL
+ logger.debug "Detected systemd is in use"
+ else
+ logger.debug "Detected systemd is not in use"
+ end
return logger
end
diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
index ce1aeb0..14b5573 100644
--- a/pcsd/cfgsync.rb
+++ b/pcsd/cfgsync.rb
@@ -460,7 +460,7 @@ module Cfgsync
class ConfigPublisher
- def initialize(auth_user, configs, nodes, cluster_name, tokens={})
+ def initialize(auth_user, configs, nodes, cluster_name, tokens={}, ports={})
@configs = configs
@nodes = nodes
@cluster_name = cluster_name
@@ -469,6 +469,7 @@ module Cfgsync
}
@additional_tokens = tokens
@auth_user = auth_user
+ @additional_ports = ports
end
def send(force=false)
@@ -487,7 +488,7 @@ module Cfgsync
threads << Thread.new {
code, out = send_request_with_token(
@auth_user, node, 'set_configs', true, data, true, nil, 30,
- @additional_tokens
+ @additional_tokens, @additional_ports
)
if 200 == code
begin
@@ -726,7 +727,9 @@ module Cfgsync
# save and sync updated config
# return true on success, false on version conflict
- def self.save_sync_new_version(config, nodes, cluster_name, fetch_on_conflict, tokens={})
+ def self.save_sync_new_version(
+ config, nodes, cluster_name, fetch_on_conflict, tokens={}, ports={}
+ )
if not cluster_name or cluster_name.empty?
# we run on a standalone host, no config syncing
config.version += 1
@@ -735,7 +738,7 @@ module Cfgsync
else
# we run in a cluster so we need to sync the config
publisher = ConfigPublisher.new(
- PCSAuth.getSuperuserAuth(), [config], nodes, cluster_name, tokens
+ PCSAuth.getSuperuserAuth(), [config], nodes, cluster_name, tokens, ports
)
old_configs, node_responses = publisher.publish()
if old_configs.include?(config.class.name)
@@ -754,7 +757,7 @@ module Cfgsync
end
end
- def self.merge_tokens_files(orig_cfg, to_merge_cfgs, new_tokens)
+ def self.merge_tokens_files(orig_cfg, to_merge_cfgs, new_tokens, new_ports)
# Merge tokens files, use only newer tokens files, keep the most recent
# tokens, make sure new_tokens are included.
max_version = orig_cfg.version
@@ -764,19 +767,24 @@ module Cfgsync
if to_merge_cfgs.length > 0
to_merge_cfgs.sort.each { |ft|
with_new_tokens.tokens.update(PCSTokens.new(ft.text).tokens)
+ with_new_tokens.ports.update(PCSTokens.new(ft.text).ports)
}
max_version = [to_merge_cfgs.max.version, max_version].max
end
end
with_new_tokens.tokens.update(new_tokens)
+ with_new_tokens.ports.update(new_ports)
config_new = PcsdTokens.from_text(with_new_tokens.text)
config_new.version = max_version
return config_new
end
- def self.save_sync_new_tokens(config, new_tokens, nodes, cluster_name)
+ def self.save_sync_new_tokens(
+ config, new_tokens, nodes, cluster_name, new_ports={}
+ )
with_new_tokens = PCSTokens.new(config.text)
with_new_tokens.tokens.update(new_tokens)
+ with_new_tokens.ports.update(new_ports)
config_new = PcsdTokens.from_text(with_new_tokens.text)
if not cluster_name or cluster_name.empty?
# we run on a standalone host, no config syncing
@@ -799,10 +807,12 @@ module Cfgsync
PCSAuth.getSuperuserAuth(), [config_new.class], nodes, cluster_name
)
fetched_tokens = fetcher.fetch_all()[config_new.class.name]
- config_new = Cfgsync::merge_tokens_files(config, fetched_tokens, new_tokens)
+ config_new = Cfgsync::merge_tokens_files(
+ config, fetched_tokens, new_tokens, new_ports
+ )
# and try to publish again
return Cfgsync::save_sync_new_version(
- config_new, nodes, cluster_name, true, new_tokens
+ config_new, nodes, cluster_name, true, new_tokens, new_ports
)
end
end
diff --git a/pcsd/config.rb b/pcsd/config.rb
index c4b4c8a..23738a3 100644
--- a/pcsd/config.rb
+++ b/pcsd/config.rb
@@ -181,15 +181,21 @@ class PCSConfig
end
end
+def hash_to_ordered_hash(hash)
+ new_hash = OrderedHash.new
+ hash.keys.sort.each { |key| new_hash[key] = hash[key] }
+ return new_hash
+end
class PCSTokens
- CURRENT_FORMAT = 2
- attr_accessor :tokens, :format_version, :data_version
+ CURRENT_FORMAT = 3
+ attr_accessor :tokens, :format_version, :data_version, :ports
def initialize(cfg_text)
@format_version = 0
@data_version = 0
@tokens = {}
+ @ports = {}
# set a reasonable parseable default if got empty text
if cfg_text.nil? or cfg_text.strip.empty?
@@ -212,6 +218,9 @@ class PCSTokens
)
end
+ if @format_version >= 3
+ @ports = json['ports'] || {}
+ end
if @format_version >= 2
@data_version = json['data_version'] || 0
@tokens = json['tokens'] || {}
@@ -226,13 +235,11 @@ class PCSTokens
end
def text()
- tokens_hash = OrderedHash.new
- @tokens.keys.sort.each { |key| tokens_hash[key] = @tokens[key] }
-
out_hash = OrderedHash.new
out_hash['format_version'] = CURRENT_FORMAT
out_hash['data_version'] = @data_version
- out_hash['tokens'] = tokens_hash
+ out_hash['tokens'] = hash_to_ordered_hash(@tokens)
+ out_hash['ports'] = hash_to_ordered_hash(@ports)
return JSON.pretty_generate(out_hash)
end
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 9725c4b..1fd5167 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -401,18 +401,24 @@ def send_nodes_request_with_token(auth_user, nodes, request, post=false, data={}
return code, out
end
-def send_request_with_token(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=nil, additional_tokens={})
- token = additional_tokens[node] || get_node_token(node)
+def send_request_with_token(
+ auth_user, node, request, post=false, data={}, remote=true, raw_data=nil,
+ timeout=nil, additional_tokens={}, additional_ports={}
+)
+ token_file_data = read_token_file()
+ token = additional_tokens[node] || token_file_data.tokens[node]
$logger.info "SRWT Node: #{node} Request: #{request}"
if not token
$logger.error "Unable to connect to node #{node}, no token available"
return 400,'{"notoken":true}'
end
+ port = additional_ports[node] || token_file_data.ports[node]
cookies_data = {
'token' => token,
}
return send_request(
- auth_user, node, request, post, data, remote, raw_data, timeout, cookies_data
+ auth_user, node, request, post, data, remote, raw_data, timeout,
+ cookies_data, port
)
end
@@ -455,7 +461,7 @@ end
def send_request(
auth_user, node, request, post=false, data={}, remote=true, raw_data=nil,
- timeout=nil, cookies_data=nil
+ timeout=nil, cookies_data=nil, port=nil
)
cookies_data = {} if not cookies_data
if request.start_with?("/")
@@ -467,10 +473,12 @@ def send_request(
node6 = "[#{node}]"
end
+ port ||= PCSD_DEFAULT_PORT
+
if remote
- url = "https://#{node6}:2224/remote/#{request}"
+ url = "https://#{node6}:#{port}/remote/#{request}"
else
- url = "https://#{node6}:2224/#{request}"
+ url = "https://#{node6}:#{port}/#{request}"
end
data = _transform_data(data)
@@ -1103,13 +1111,21 @@ def is_cib_true(var)
return ['true', 'on', 'yes', 'y', '1'].include?(var.downcase)
end
+def read_token_file()
+ return PCSTokens.new(Cfgsync::PcsdTokens.from_file().text())
+end
+
def read_tokens()
- return PCSTokens.new(Cfgsync::PcsdTokens.from_file().text()).tokens
+ return read_token_file().tokens
+end
+
+def get_nodes_ports()
+ return read_token_file().ports
end
def write_tokens(tokens)
begin
- cfg = PCSTokens.new(Cfgsync::PcsdTokens.from_file().text())
+ cfg = read_token_file()
cfg.tokens = tokens
Cfgsync::PcsdTokens.from_text(cfg.text()).save()
rescue
@@ -1196,6 +1212,7 @@ def check_gui_status_of_nodes(auth_user, nodes, check_mutuality=false, timeout=1
end
def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
+ # nodes is hash, (nodename -> port)
# if no sync is needed, do not report a sync error
sync_successful = true
sync_failed_nodes = []
@@ -1203,7 +1220,7 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
# check for already authorized nodes
if not force
online, offline, not_authenticated = check_gui_status_of_nodes(
- auth_user, nodes, true
+ auth_user, nodes.keys, true
)
if not_authenticated.length < 1
result = {}
@@ -1220,8 +1237,12 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
# get the tokens and sync them within the local cluster
new_tokens = {}
+ ports = {}
auth_responses.each { |node, response|
- new_tokens[node] = response['token'] if 'ok' == response['status']
+ if 'ok' == response['status']
+ new_tokens[node] = response['token']
+ ports[node] = nodes[node]
+ end
}
if not new_tokens.empty?
cluster_nodes = get_corosync_nodes()
@@ -1231,12 +1252,12 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
if Process.uid != 0
# other tokens just need to be stored localy for the user
sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
- tokens_cfg, new_tokens, [], nil
+ tokens_cfg, new_tokens, [], nil, ports
)
return auth_responses, sync_successful, sync_failed_nodes, sync_responses
end
sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
- tokens_cfg, new_tokens, cluster_nodes, $cluster_name
+ tokens_cfg, new_tokens, cluster_nodes, $cluster_name, ports
)
sync_failed_nodes = []
sync_not_supported_nodes = []
@@ -1257,10 +1278,10 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
if not local
# authorize nodes outside of the local cluster and nodes not supporting
# the tokens file synchronization in the other direction
- nodes_to_auth = []
- nodes.each { |node|
- nodes_to_auth << node if sync_not_supported_nodes.include?(node)
- nodes_to_auth << node if not cluster_nodes.include?(node)
+ nodes_to_auth = {}
+ nodes.each { |node, port|
+ nodes_to_auth[node] = port if sync_not_supported_nodes.include?(node)
+ nodes_to_auth[node] = port if not cluster_nodes.include?(node)
}
auth_responses2 = run_auth_requests(
auth_user, nodes_to_auth, nodes, username, password, force, false
@@ -1274,8 +1295,11 @@ end
def run_auth_requests(auth_user, nodes_to_send, nodes_to_auth, username, password, force=false, local=true)
data = {}
- nodes_to_auth.each_with_index { |node, index|
+ index = 0
+ nodes_to_auth.each { |node, port|
data["node-#{index}"] = node
+ data["port-#{node}"] = port if port
+ index += 1
}
data['username'] = username
data['password'] = password
@@ -1284,9 +1308,11 @@ def run_auth_requests(auth_user, nodes_to_send, nodes_to_auth, username, passwor
auth_responses = {}
threads = []
- nodes_to_send.each { |node|
+ nodes_to_send.each { |node, port|
threads << Thread.new {
- code, response = send_request(auth_user, node, 'auth', true, data)
+ code, response = send_request(
+ auth_user, node, 'auth', true, data, true, nil, nil, nil, port
+ )
if 200 == code
token = response.strip
if '' == token
diff --git a/pcsd/pcsd-cli.rb b/pcsd/pcsd-cli.rb
index db56c91..1e4ed10 100755
--- a/pcsd/pcsd-cli.rb
+++ b/pcsd/pcsd-cli.rb
@@ -66,7 +66,13 @@ allowed_commands = {
# returns tokens of the user who runs pcsd-cli, thus no permission check
'only_superuser' => false,
'permissions' => nil,
- 'call' => lambda { |params, auth_user_| read_tokens() },
+ 'call' => lambda { |params, auth_user_|
+ token_cfg = read_token_file()
+ return {
+ :tokens => token_cfg.tokens,
+ :ports => token_cfg.ports,
+ }
+ },
},
'auth' => {
'only_superuser' => false,
diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
index 8a929df..f8e04ed 100644
--- a/pcsd/pcsd.8
+++ b/pcsd/pcsd.8
@@ -1,4 +1,4 @@
-.TH PCSD "8" "June 2017" "pcs 0.9.159" "System Administration Utilities"
+.TH PCSD "8" "October 2017" "pcs 0.9.160" "System Administration Utilities"
.SH NAME
pcsd \- pacemaker/corosync configuration system daemon
@@ -12,6 +12,9 @@ Daemon for controlling and configuring pacakamer/corosync clusters via pcs.
.B PCSD_BIND_ADDR=<string>
List of IP addresses pcsd should bind to delimited by ',' character.
.TP
+.B PCSD_PORT=<string>
+Port on which pcsd should be available.
+.TP
.B PCSD_SSL_OPTIONS=<string>
SSL/TLS options delimited by ',' character. This is usually used to set SSL/TLS protocols accepted by pcsd. List of valid options can be obtained by running: ruby -e 'require "openssl"; puts OpenSSL::SSL.constants.grep /^OP_/'
.TP
diff --git a/pcsd/pcsd.conf b/pcsd/pcsd.conf
index b3433ac..cfb1191 100644
--- a/pcsd/pcsd.conf
+++ b/pcsd/pcsd.conf
@@ -8,6 +8,8 @@ PCSD_DISABLE_GUI=false
PCSD_SESSION_LIFETIME=3600
# List of IP addresses pcsd should bind to delimited by ',' character
#PCSD_BIND_ADDR='::'
+# Set port on which pcsd should be available
+#PCSD_PORT=2224
# SSL settings
# set SSL options delimited by ',' character
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index 1026a36..ad0370c 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -463,7 +463,7 @@ already been added to pcsd. You may not add two clusters with the same name int
# auth begin
retval, out = send_request_with_token(
- PCSAuth.getSuperuserAuth(), node, '/get_cluster_tokens'
+ PCSAuth.getSuperuserAuth(), node, '/get_cluster_tokens', false, {'with_ports' => '1'}
)
if retval == 404 # backward compatibility layer
warning_messages << "Unable to do correct authentication of cluster because it is running old version of pcs/pcsd."
@@ -472,14 +472,24 @@ already been added to pcsd. You may not add two clusters with the same name int
return 400, "Unable to get authentication info from cluster '#{status['cluster_name']}'."
end
begin
- new_tokens = JSON.parse(out)
+ data = JSON.parse(out)
+ expected_keys = ['tokens', 'ports']
+ if expected_keys.all? {|i| data.has_key?(i) and data[i].class == Hash}
+ # new format
+ new_tokens = data["tokens"] || {}
+ new_ports = data["ports"] || {}
+ else
+ # old format
+ new_tokens = data
+ new_ports = {}
+ end
rescue
return 400, "Unable to get authentication info from cluster '#{status['cluster_name']}'."
end
sync_config = Cfgsync::PcsdTokens.from_file()
pushed, _ = Cfgsync::save_sync_new_tokens(
- sync_config, new_tokens, get_corosync_nodes(), $cluster_name
+ sync_config, new_tokens, get_corosync_nodes(), $cluster_name, new_ports
)
if not pushed
return 400, "Configuration conflict detected.\n\nSome nodes had a newer configuration than the local node. Local node's configuration was updated. Please repeat the last action if appropriate."
@@ -542,10 +552,16 @@ already been added to pcsd. You may not add two clusters with the same name int
}
# first we need to authenticate nodes to each other
- tokens = add_prefix_to_keys(get_tokens_of_nodes(@nodes), "node:")
+ token_file_data = read_token_file()
+ tokens_data = add_prefix_to_keys(
+ token_file_data.tokens.select {|k,v| @nodes.include?(k)}, "node:"
+ )
+ ports_data = add_prefix_to_keys(
+ token_file_data.ports.select {|k,v| @nodes.include?(k)}, "port:"
+ )
@nodes.each {|n|
retval, out = send_request_with_token(
- auth_user, n, "/save_tokens", true, tokens
+ auth_user, n, "/save_tokens", true, tokens_data.merge(ports_data)
)
if retval == 404 # backward compatibility layer
warning_messages << "Unable to do correct authentication of cluster on node '#{n}', because it is running old version of pcs/pcsd."
@@ -621,22 +637,36 @@ already been added to pcsd. You may not add two clusters with the same name int
get '/manage/check_pcsd_status' do
auth_user = PCSAuth.sessionToAuthUser(session)
- node_results = {}
+ node_list = []
if params[:nodes] != nil and params[:nodes] != ''
- node_array = params[:nodes].split(',')
- online, offline, notauthorized = check_gui_status_of_nodes(
- auth_user, node_array
- )
- online.each { |node|
- node_results[node] = 'Online'
- }
- offline.each { |node|
- node_results[node] = 'Offline'
- }
- notauthorized.each { |node|
- node_results[node] = 'Unable to authenticate'
- }
+ node_list = params[:nodes].split(',')
end
+ ports = {}
+ node_list.each { |node|
+ ports[node] = (params["port-#{node}"] || '').strip
+ }
+ node_results = {}
+ node_list_to_check = []
+ token_file = read_token_file()
+ ports.each { |node, port|
+ if port != (token_file.ports[node] || '')
+ node_results[node] = 'Unable to authenticate'
+ else
+ node_list_to_check << node
+ end
+ }
+ online, offline, notauthorized = check_gui_status_of_nodes(
+ auth_user, node_list_to_check
+ )
+ online.each { |node|
+ node_results[node] = 'Online'
+ }
+ offline.each { |node|
+ node_results[node] = 'Offline'
+ }
+ notauthorized.each { |node|
+ node_results[node] = 'Unable to authenticate'
+ }
return JSON.generate(node_results)
end
@@ -671,6 +701,7 @@ already been added to pcsd. You may not add two clusters with the same name int
auth_user = PCSAuth.sessionToAuthUser(session)
node_auth_error = {}
new_tokens = {}
+ new_ports = {}
threads = []
params.each { |node|
threads << Thread.new {
@@ -681,18 +712,26 @@ already been added to pcsd. You may not add two clusters with the same name int
else
pass = node[1]
end
+ port = (params["port-#{nodename}"] || '').strip
+ if port == ''
+ port = nil
+ end
data = {
'node-0' => nodename,
'username' => SUPERUSER,
'password' => pass,
'force' => 1,
+ "port-#{nodename}" => port,
}
node_auth_error[nodename] = 1
- code, response = send_request(auth_user, nodename, 'auth', true, data)
+ code, response = send_request(
+ auth_user, nodename, 'auth', true, data, true, nil, nil, nil, port
+ )
if 200 == code
token = response.strip
if not token.empty?
new_tokens[nodename] = token
+ new_ports[nodename] = port
node_auth_error[nodename] = 0
end
end
@@ -701,11 +740,11 @@ already been added to pcsd. You may not add two clusters with the same name int
}
threads.each { |t| t.join }
- if not new_tokens.empty?
+ if not new_tokens.empty? or not new_ports.empty?
cluster_nodes = get_corosync_nodes()
tokens_cfg = Cfgsync::PcsdTokens.from_file()
sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
- tokens_cfg, new_tokens, cluster_nodes, $cluster_name
+ tokens_cfg, new_tokens, cluster_nodes, $cluster_name, new_ports
)
end
@@ -1160,11 +1199,17 @@ already been added to pcsd. You may not add two clusters with the same name int
end
nodes = get_cluster_nodes(clustername)
- tokens_data = add_prefix_to_keys(get_tokens_of_nodes(nodes), "node:")
+ token_file_data = read_token_file()
+ tokens_data = add_prefix_to_keys(
+ token_file_data.tokens.select {|k,v| nodes.include?(k)}, "node:"
+ )
+ ports_data = add_prefix_to_keys(
+ token_file_data.ports.select {|k,v| nodes.include?(k)}, "port:"
+ )
retval, out = send_cluster_request_with_token(
PCSAuth.getSuperuserAuth(), clustername, "/save_tokens", true,
- tokens_data, true
+ tokens_data.merge(ports_data), true
)
if retval == 404
return [400, "Old version of PCS/PCSD is running on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."]
@@ -1185,7 +1230,8 @@ already been added to pcsd. You may not add two clusters with the same name int
end
end
- tokens = read_tokens
+ token_file_data = read_token_file()
+ tokens = token_file_data.tokens
if not tokens.include? new_node
return [400, "New node is not authenticated."]
@@ -1194,7 +1240,10 @@ already been added to pcsd. You may not add two clusters with the same name int
# Save the new node token on all nodes in a cluster the new node is beeing
# added to. Send the token to one node and let the cluster nodes synchronize
# it by themselves.
- token_data = {"node:#{new_node}" => tokens[new_node]}
+ token_data = {
+ "node:#{new_node}" => tokens[new_node],
+ "port:#{new_node}" => token_file_data.ports[new_node],
+ }
retval, out = send_cluster_request_with_token(
# new node doesn't have config with permissions yet
PCSAuth.getSuperuserAuth(), clustername, '/save_tokens', true, token_data
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index d98e534..239b915 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -1909,19 +1909,20 @@ Pcs.clusterController = Ember.Object.create({
});
}
- var nodes_to_auth = [];
+ var nodes_to_auth = {};
$.each(cluster.get('warning_list'), function(key, val){
if (val.hasOwnProperty("type") && val.type == "nodes_not_authorized"){
- nodes_to_auth = nodes_to_auth.concat(val['node_list']);
+ $.each(val['node_list'], function(i, node) {
+ nodes_to_auth[node] = '';
+ });
}
});
- nodes_to_auth = $.unique(nodes_to_auth);
- if (cluster.get('need_reauth') || nodes_to_auth.length > 0) {
+ if (cluster.get('need_reauth') || Object.keys(nodes_to_auth).length > 0) {
cluster.get('warning_list').pushObject({
- message: "There are few authentication problems. To fix them, click <a href='#' onclick='auth_nodes_dialog(" + JSON.stringify(nodes_to_auth) + ", null, function() {fix_auth_of_cluster();})'>here</a>.",
+ message: "There are few authentication problems. To fix them, click <a href='#' onclick='auth_nodes_dialog(" + JSON.stringify(nodes_to_auth) + ", null, function() {fix_auth_of_cluster();}, true)'>here</a>.",
type: "nodes_not_authorized",
- node_list: self.nodes_to_auth
+ node_list: Object.keys(nodes_to_auth)
});
}
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index 4754139..bf67a24 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -237,16 +237,22 @@ function checkAddingNode(){
$("#add_node_submit_btn").button("option", "disabled", false);
return false;
}
+ var port = $("#add_node_selector input.port").val().trim();
+ var data = {};
+ data["nodes"] = nodeName;
+ data["port-" + nodeName] = port;
ajax_wrapper({
type: 'GET',
url: '/manage/check_pcsd_status',
- data: {"nodes": nodeName},
+ data: data,
timeout: pcs_timeout,
success: function (data) {
var mydata = jQuery.parseJSON(data);
if (mydata[nodeName] == "Unable to authenticate") {
- auth_nodes_dialog([nodeName], function(){$("#add_node_submit_btn").trigger("click");});
+ var nodes_to_auth = {};
+ nodes_to_auth[nodeName] = port;
+ auth_nodes_dialog(nodes_to_auth, function(){$("#add_node_submit_btn").trigger("click");});
$("#add_node_submit_btn").button("option", "disabled", false);
} else if (mydata[nodeName] == "Offline") {
alert("Unable to contact node '" + nodeName + "'");
@@ -659,11 +665,15 @@ function checkExistingNode() {
$('input[name="node-name"]').each(function(i,e) {
node = e.value;
});
+ var port = $("#add_existing_cluster_form input.port").val().trim();
+ var data = {};
+ data["nodes"] = node;
+ data["port-" + node] = port;
ajax_wrapper({
type: 'GET',
url: '/manage/check_pcsd_status',
- data: {"nodes": node},
+ data: data,
timeout: pcs_timeout,
success: function (data) {
mydata = jQuery.parseJSON(data);
@@ -676,25 +686,39 @@ function checkExistingNode() {
});
}
+function get_node_ports_object_from_create_new_cluster_dialog() {
+ var nodes = {}
+ $("#create_new_cluster_form tr.new_node").each(function(i, e) {
+ var node = $(e).find(".node").val().trim();
+ var port = $(e).find(".port").val().trim();
+ nodes[node] = port;
+ });
+ return nodes;
+}
+
function checkClusterNodes() {
- var nodes = [];
- $('input[name^="node-"]').each(function(i,e) {
- if (e.value != "") {
- nodes.push(e.value)
+ var nodes = get_node_ports_object_from_create_new_cluster_dialog();
+ var node_list_str = Object.keys(nodes).join(",");
+ var request_data = {
+ "nodes": node_list_str,
+ };
+ $.each(nodes, function(node, port) {
+ if (port) {
+ request_data["port-" + node] = port;
}
});
ajax_wrapper({
type: 'GET',
url: '/manage/check_pcsd_status',
- data: {"nodes": nodes.join(",")},
+ data: request_data,
timeout: pcs_timeout,
success: function (data) {
mydata = jQuery.parseJSON(data);
ajax_wrapper({
type: 'GET',
url: '/manage/get_nodes_sw_versions',
- data: {"nodes": nodes.join(",")},
+ data: {"nodes": node_list_str},
timeout: pcs_timeout,
success: function(data) {
versions = jQuery.parseJSON(data);
@@ -784,9 +808,10 @@ function auth_nodes_dialog_update(dialog_obj, data) {
return unauth_nodes;
}
-function auth_nodes_dialog(unauth_nodes, callback_success, callback_success_one) {
+function auth_nodes_dialog(unauth_nodes, callback_success, callback_success_one, ports_required) {
callback_success = typeof callback_success !== 'undefined' ? callback_success : null;
callback_success_one = typeof callback_success_one !== 'undefined' ? callback_success_one : null;
+ ports_required = typeof ports_required !== 'undefined' ? ports_required : false;
var buttonsOpts = [
{
@@ -825,15 +850,16 @@ function auth_nodes_dialog(unauth_nodes, callback_success, callback_success_one)
return false;
}
});
+ unauth_nodes_count = Object.keys(unauth_nodes).length;
- if (unauth_nodes.length == 0) {
+ if (unauth_nodes_count == 0) {
if (callback_success !== null) {
callback_success();
}
return;
}
- if (unauth_nodes.length == 1) {
+ if (unauth_nodes_count == 1) {
dialog_obj.find("#same_pass").hide();
} else {
dialog_obj.find("#same_pass").show();
@@ -843,8 +869,16 @@ function auth_nodes_dialog(unauth_nodes, callback_success, callback_success_one)
}
dialog_obj.find('#auth_nodes_list').empty();
- unauth_nodes.forEach(function(node) {
- dialog_obj.find('#auth_nodes_list').append("\t\t\t<tr><td>" + htmlEncode(node) + '</td><td><input type="password" name="' + htmlEncode(node) + '-pass"></td></tr>\n');
+ $.each(unauth_nodes, function(node, port) {
+ var html = "<tr><td>" + htmlEncode(node) + "</td><td>";
+ if (ports_required) {
+ html += ':<input type="text" size="5" name="port-' + htmlEncode(node) + '" placeholder="2224" />';
+ } else {
+ html += '<input type="hidden" name="port-' + htmlEncode(node) + '" value="' + htmlEncode(port) + '" />';
+ }
+ html += '</td><td><input type="password" name="' + htmlEncode(node) + '-pass"></td></tr>';
+
+ dialog_obj.find('#auth_nodes_list').append(html);
});
}
@@ -905,7 +939,9 @@ function update_existing_cluster_dialog(data) {
});
return;
} else if (data[i] == "Unable to authenticate") {
- auth_nodes_dialog([i], function() {$("#add_existing_submit_btn").trigger("click");});
+ var nodes_to_auth = {};
+ nodes_to_auth[i] = $("#add_existing_cluster_form input.port").val().trim();
+ auth_nodes_dialog(nodes_to_auth, function() {$("#add_existing_submit_btn").trigger("click");});
$("#add_existing_submit_btn").button("option", "disabled", false);
return;
}
@@ -963,7 +999,12 @@ function update_create_cluster_dialog(nodes, version_info) {
});
if (cant_auth_nodes.length > 0) {
- auth_nodes_dialog(cant_auth_nodes, function(){$("#create_cluster_submit_btn").trigger("click")});
+ var ports = get_node_ports_object_from_create_new_cluster_dialog();
+ var unauth_nodes_with_ports = {};
+ $.each(cant_auth_nodes, function(i, node) {
+ unauth_nodes_with_ports[node] = ports[node];
+ });
+ auth_nodes_dialog(unauth_nodes_with_ports, function(){$("#create_cluster_submit_btn").trigger("click")});
$("#create_cluster_submit_btn").button("option", "disabled", false);
return;
}
@@ -1144,7 +1185,8 @@ function create_cluster_add_nodes() {
first_node = node_list.eq(0);
new_node = first_node.clone();
- $("input",new_node).attr("name", "node-"+(cur_num_nodes+1));
+ $("input[name=node-1]",new_node).attr("name", "node-"+(cur_num_nodes+1));
+ $("input[name=port-1]",new_node).attr("name", "port-"+(cur_num_nodes+1));
$("input",new_node).val("");
$("td", new_node).first().text("Node " + (cur_num_nodes+1)+ ":");
new_node.insertAfter(node_list.last());
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 672b5e9..34d0318 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -1347,16 +1347,16 @@ end
def auth(params, request, auth_user)
token = PCSAuth.validUser(params['username'],params['password'], true)
# If we authorized to this machine, attempt to authorize everywhere
- node_list = []
+ nodes = {}
if token and params["bidirectional"]
params.each { |k,v|
if k.start_with?("node-")
- node_list.push(v)
+ nodes[v] = params["port-#{v}"]
end
}
- if node_list.length > 0
+ if nodes.length > 0
pcs_auth(
- auth_user, node_list, params['username'], params['password'],
+ auth_user, nodes, params['username'], params['password'],
params["force"] == "1"
)
end
@@ -2083,7 +2083,16 @@ def get_cluster_tokens(params, request, auth_user)
on, off = get_nodes
nodes = on + off
nodes.uniq!
- return [200, JSON.generate(get_tokens_of_nodes(nodes))]
+ token_file_data = read_token_file()
+ tokens = token_file_data.tokens.select {|k,v| nodes.include?(k)}
+ if params["with_ports"] != '1'
+ return [200, JSON.generate(tokens)]
+ end
+ data = {
+ :tokens => tokens,
+ :ports => token_file_data.ports.select {|k,v| nodes.include?(k)},
+ }
+ return [200, JSON.generate(data)]
end
def save_tokens(params, request, auth_user)
@@ -2093,18 +2102,23 @@ def save_tokens(params, request, auth_user)
end
new_tokens = {}
+ new_ports = {}
params.each{|nodes|
if nodes[0].start_with?"node:" and nodes[0].length > 5
node = nodes[0][5..-1]
- token = nodes[1]
- new_tokens[node] = token
+ new_tokens[node] = nodes[1]
+ port = (params["port:#{node}"] || '').strip
+ if port == ''
+ port = nil
+ end
+ new_ports[node] = port
end
}
tokens_cfg = Cfgsync::PcsdTokens.from_file()
sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
- tokens_cfg, new_tokens, get_corosync_nodes(), $cluster_name
+ tokens_cfg, new_tokens, get_corosync_nodes(), $cluster_name, new_ports
)
if sync_successful
diff --git a/pcsd/settings.rb b/pcsd/settings.rb
index a400bc5..b854b1b 100644
--- a/pcsd/settings.rb
+++ b/pcsd/settings.rb
@@ -1,6 +1,7 @@
PCS_EXEC = '/usr/sbin/pcs'
PCSD_EXEC_LOCATION = '/usr/lib/pcsd/'
PCSD_VAR_LOCATION = '/var/lib/pcsd/'
+PCSD_DEFAULT_PORT = 2224
CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
index 1a41ab2..eaf2cbf 100644
--- a/pcsd/ssl.rb
+++ b/pcsd/ssl.rb
@@ -8,6 +8,7 @@ require 'socket'
require 'bootstrap.rb'
require 'pcs.rb'
+require 'settings.rb'
unless defined? OpenSSL::SSL::OP_NO_TLSv1_1
OpenSSL::SSL::OP_NO_TLSv1_1 = 268435456
@@ -140,7 +141,7 @@ if ENV['PCSD_BIND_ADDR']
end
webrick_options = {
- :Port => 2224,
+ :Port => ENV['PCSD_PORT'] || PCSD_DEFAULT_PORT,
:BindAddress => primary_addr,
:Host => primary_addr,
:SSLEnable => true,
diff --git a/pcsd/test/test_cfgsync.rb b/pcsd/test/test_cfgsync.rb
index 152b522..9b0317c 100644
--- a/pcsd/test/test_cfgsync.rb
+++ b/pcsd/test/test_cfgsync.rb
@@ -229,41 +229,49 @@ class TestPcsdTokens < Test::Unit::TestCase
assert_equal('tokens', Cfgsync::PcsdTokens.name)
text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 3,
"tokens": {
"rh7-1": "token-rh7-1",
"rh7-2": "token-rh7-2"
+ },
+ "ports": {
+ "rh7-1": "1234",
+ "rh7-2": null
}
}'
cfg = Cfgsync::PcsdTokens.from_text(text)
assert_equal(text, cfg.text)
assert_equal(3, cfg.version)
- assert_equal('c362c4354ceb0b0425c71ed955d43f89c3d4304a', cfg.hash)
+ assert_equal('aedd225c15fb8cc41c1a34a5dd42b9f403ebc0de', cfg.hash)
cfg.version = 4
assert_equal(4, cfg.version)
- assert_equal('9586d6ce66f6fc649618f7f55005d8ddfe54db9b', cfg.hash)
+ assert_equal('365d26bdf61966f8372ec23cdefd2a7cb235de02', cfg.hash)
cfg.text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 4,
"tokens": {
"rh7-1": "token-rh7-1",
"rh7-2": "token-rh7-2"
+ },
+ "ports": {
+ "rh7-1": "1234",
+ "rh7-2": null
}
}'
assert_equal(4, cfg.version)
- assert_equal('9586d6ce66f6fc649618f7f55005d8ddfe54db9b', cfg.hash)
+ assert_equal('365d26bdf61966f8372ec23cdefd2a7cb235de02', cfg.hash)
end
def test_file()
FileUtils.cp(File.join(CURRENT_DIR, 'tokens'), CFG_PCSD_TOKENS)
cfg = Cfgsync::PcsdTokens.from_file()
assert_equal(9, cfg.version)
- assert_equal('571afb6abc603f527462818e7dfe278a8a1f64a7', cfg.hash)
+ assert_equal('1ddfeb1a7ada600356945344bd3c137c09cf5845', cfg.hash)
end
def test_file_missing()
@@ -698,34 +706,49 @@ class TestMergeTokens < Test::Unit::TestCase
def test_nothing_to_merge()
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, nil, {})
+ new_cfg = Cfgsync::merge_tokens_files(old_cfg, nil, {}, {})
assert_equal(old_cfg.text.strip, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, nil, {'rh7-4' => 'token4'})
+ new_cfg = Cfgsync::merge_tokens_files(
+ old_cfg, nil, {'rh7-4' => 'token4'}, {'rh7-4' => '4321'}
+ )
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 9,
"tokens": {
"rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
"rh7-4": "token4"
+ },
+ "ports": {
+ "rh7-1": null,
+ "rh7-2": "2224",
+ "rh7-3": "1234",
+ "rh7-4": "4321"
}
}'
assert_equal(new_cfg_text, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, nil, {'rh7-3' => 'token3'})
+ new_cfg = Cfgsync::merge_tokens_files(
+ old_cfg, nil, {'rh7-3' => 'token3'}, {}
+ )
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 9,
"tokens": {
"rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "token3"
+ },
+ "ports": {
+ "rh7-1": null,
+ "rh7-2": "2224",
+ "rh7-3": "1234"
}
}'
assert_equal(new_cfg_text, new_cfg.text.strip)
@@ -735,45 +758,64 @@ class TestMergeTokens < Test::Unit::TestCase
to_merge = [
Cfgsync::PcsdTokens.from_text(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 1,
"tokens": {
"rh7-1": "token1",
"rh7-4": "token4a"
+ },
+ "ports": {
+ "rh7-1": null,
+ "rh7-4": "2224"
}
}'
)
]
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {})
+ new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {}, {})
assert_equal(old_cfg.text.strip, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-4' => 'token4'})
+ new_cfg = Cfgsync::merge_tokens_files(
+ old_cfg, to_merge, {'rh7-4' => 'token4'}, {'rh7-4' => '4321'}
+ )
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 9,
"tokens": {
"rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
"rh7-4": "token4"
+ },
+ "ports": {
+ "rh7-1": null,
+ "rh7-2": "2224",
+ "rh7-3": "1234",
+ "rh7-4": "4321"
}
}'
assert_equal(new_cfg_text, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-3' => 'token3'})
+ new_cfg = Cfgsync::merge_tokens_files(
+ old_cfg, to_merge, {'rh7-3' => 'token3'}, {}
+ )
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 9,
"tokens": {
"rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "token3"
+ },
+ "ports": {
+ "rh7-1": null,
+ "rh7-2": "2224",
+ "rh7-3": "1234"
}
}'
assert_equal(new_cfg_text, new_cfg.text.strip)
@@ -783,57 +825,79 @@ class TestMergeTokens < Test::Unit::TestCase
to_merge = [
Cfgsync::PcsdTokens.from_text(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 11,
"tokens": {
"rh7-1": "token1",
"rh7-4": "token4a"
+ },
+ "ports": {
+ "rh7-1": "4321",
+ "rh7-4": null
}
}'
)
]
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {})
+ new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {}, {})
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 11,
"tokens": {
"rh7-1": "token1",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
"rh7-4": "token4a"
+ },
+ "ports": {
+ "rh7-1": "4321",
+ "rh7-2": "2224",
+ "rh7-3": "1234",
+ "rh7-4": null
}
}'
assert_equal(new_cfg_text, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-4' => 'token4'})
+ new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-4' => 'token4'}, {'rh7-4' => '12345'})
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 11,
"tokens": {
"rh7-1": "token1",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
"rh7-4": "token4"
+ },
+ "ports": {
+ "rh7-1": "4321",
+ "rh7-2": "2224",
+ "rh7-3": "1234",
+ "rh7-4": "12345"
}
}'
assert_equal(new_cfg_text, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
- new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-3' => 'token3'})
+ new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-3' => 'token3'}, {})
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 11,
"tokens": {
"rh7-1": "token1",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "token3",
"rh7-4": "token4a"
+ },
+ "ports": {
+ "rh7-1": "4321",
+ "rh7-2": "2224",
+ "rh7-3": "1234",
+ "rh7-4": null
}
}'
assert_equal(new_cfg_text, new_cfg.text.strip)
@@ -842,88 +906,116 @@ class TestMergeTokens < Test::Unit::TestCase
def test_more_to_merge()
to_merge_12 = Cfgsync::PcsdTokens.from_text(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 12,
"tokens": {
"rh7-2": "token2",
"rh7-4": "token4b"
+ },
+ "ports": {
+ "rh7-2": "port2",
+ "rh7-4": "port4b"
}
}'
)
to_merge_11 = Cfgsync::PcsdTokens.from_text(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 11,
"tokens": {
"rh7-1": "token1",
"rh7-4": "token4a"
+ },
+ "ports": {
+ "rh7-1": "port1",
+ "rh7-4": "port4a"
}
}'
)
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 12,
"tokens": {
"rh7-1": "token1",
"rh7-2": "token2",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
"rh7-4": "token4b"
+ },
+ "ports": {
+ "rh7-1": "port1",
+ "rh7-2": "port2",
+ "rh7-3": "1234",
+ "rh7-4": "port4b"
}
}'
old_cfg = Cfgsync::PcsdTokens.from_file()
new_cfg = Cfgsync::merge_tokens_files(
- old_cfg, [to_merge_11, to_merge_12], {}
+ old_cfg, [to_merge_11, to_merge_12], {}, {}
)
assert_equal(new_cfg_text, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
new_cfg = Cfgsync::merge_tokens_files(
- old_cfg, [to_merge_12, to_merge_11], {}
+ old_cfg, [to_merge_12, to_merge_11], {}, {}
)
assert_equal(new_cfg_text, new_cfg.text.strip)
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 12,
"tokens": {
"rh7-1": "token1",
"rh7-2": "token2",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
"rh7-4": "token4"
+ },
+ "ports": {
+ "rh7-1": "port1",
+ "rh7-2": "port2",
+ "rh7-3": "1234",
+ "rh7-4": "port4"
}
}'
old_cfg = Cfgsync::PcsdTokens.from_file()
new_cfg = Cfgsync::merge_tokens_files(
- old_cfg, [to_merge_11, to_merge_12], {'rh7-4' => 'token4'}
+ old_cfg, [to_merge_11, to_merge_12], {'rh7-4' => 'token4'},
+ {'rh7-4' => 'port4'}
)
assert_equal(new_cfg_text, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
new_cfg = Cfgsync::merge_tokens_files(
- old_cfg, [to_merge_12, to_merge_11], {'rh7-4' => 'token4'}
+ old_cfg, [to_merge_12, to_merge_11], {'rh7-4' => 'token4'},
+ {'rh7-4' => 'port4'}
)
assert_equal(new_cfg_text, new_cfg.text.strip)
new_cfg_text =
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 12,
"tokens": {
"rh7-1": "token1",
"rh7-2": "token2",
"rh7-3": "token3",
"rh7-4": "token4b"
+ },
+ "ports": {
+ "rh7-1": "port1",
+ "rh7-2": "port2",
+ "rh7-3": "1234",
+ "rh7-4": "port4b"
}
}'
old_cfg = Cfgsync::PcsdTokens.from_file()
new_cfg = Cfgsync::merge_tokens_files(
- old_cfg, [to_merge_11, to_merge_12], {'rh7-3' => 'token3'}
+ old_cfg, [to_merge_11, to_merge_12], {'rh7-3' => 'token3'}, {}
)
assert_equal(new_cfg_text, new_cfg.text.strip)
old_cfg = Cfgsync::PcsdTokens.from_file()
new_cfg = Cfgsync::merge_tokens_files(
- old_cfg, [to_merge_12, to_merge_11], {'rh7-3' => 'token3'}
+ old_cfg, [to_merge_12, to_merge_11], {'rh7-3' => 'token3'}, {}
)
assert_equal(new_cfg_text, new_cfg.text.strip)
end
diff --git a/pcsd/test/test_config.rb b/pcsd/test/test_config.rb
index 441ac6d..43021f9 100644
--- a/pcsd/test/test_config.rb
+++ b/pcsd/test/test_config.rb
@@ -673,9 +673,11 @@ class TestTokens < Test::Unit::TestCase
def fixture_empty_config()
return(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 0,
"tokens": {
+ },
+ "ports": {
}
}'
)
@@ -713,14 +715,17 @@ class TestTokens < Test::Unit::TestCase
text = '{"rh7-1": "token-rh7-1", "rh7-2": "token-rh7-2"}'
cfg = PCSTokens.new(text)
assert_equal(2, cfg.tokens.length)
+ assert_equal(0, cfg.ports.length)
assert_equal('token-rh7-1', cfg.tokens['rh7-1'])
assert_equal(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 0,
"tokens": {
"rh7-1": "token-rh7-1",
"rh7-2": "token-rh7-2"
+ },
+ "ports": {
}
}',
cfg.text
@@ -737,11 +742,14 @@ class TestTokens < Test::Unit::TestCase
assert_equal(2, cfg.format_version)
assert_equal(0, cfg.data_version)
assert_equal(0, cfg.tokens.length)
+ assert_equal(0, cfg.ports.length)
assert_equal(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 0,
"tokens": {
+ },
+ "ports": {
}
}',
cfg.text
@@ -760,7 +768,66 @@ class TestTokens < Test::Unit::TestCase
assert_equal(2, cfg.format_version)
assert_equal(9, cfg.data_version)
assert_equal(2, cfg.tokens.length)
+ assert_equal(0, cfg.ports.length)
assert_equal('token-rh7-1', cfg.tokens['rh7-1'])
+ expected_text =
+'{
+ "format_version": 3,
+ "data_version": 9,
+ "tokens": {
+ "rh7-1": "token-rh7-1",
+ "rh7-2": "token-rh7-2"
+ },
+ "ports": {
+ }
+}'
+ assert_equal(expected_text, cfg.text)
+ end
+
+ def test_parse_format3()
+ text =
+'{
+ "format_version": 3,
+ "tokens": {},
+ "ports": {}
+}'
+ cfg = PCSTokens.new(text)
+ assert_equal(3, cfg.format_version)
+ assert_equal(0, cfg.data_version)
+ assert_equal(0, cfg.tokens.length)
+ assert_equal(0, cfg.ports.length)
+ assert_equal(
+'{
+ "format_version": 3,
+ "data_version": 0,
+ "tokens": {
+ },
+ "ports": {
+ }
+}',
+ cfg.text
+ )
+
+ text =
+'{
+ "format_version": 3,
+ "data_version": 9,
+ "tokens": {
+ "rh7-1": "token-rh7-1",
+ "rh7-2": "token-rh7-2"
+ },
+ "ports": {
+ "rh7-1": "1234",
+ "rh7-2": null
+ }
+}'
+ cfg = PCSTokens.new(text)
+ assert_equal(3, cfg.format_version)
+ assert_equal(9, cfg.data_version)
+ assert_equal(2, cfg.tokens.length)
+ assert_equal(2, cfg.ports.length)
+ assert_equal('token-rh7-1', cfg.tokens['rh7-1'])
+ assert_equal('1234', cfg.ports['rh7-1'])
assert_equal(text, cfg.text)
end
@@ -774,6 +841,14 @@ class TestTokens < Test::Unit::TestCase
},
cfg.tokens
)
+ assert_equal(
+ {
+ 'rh7-1' => nil,
+ 'rh7-2' => '2224',
+ 'rh7-3' => '1234',
+ },
+ cfg.ports
+ )
cfg.tokens.delete('rh7-2')
assert_equal(
@@ -793,14 +868,39 @@ class TestTokens < Test::Unit::TestCase
},
cfg.tokens
)
+
+ cfg.ports.delete('rh7-3')
+ assert_equal(
+ {
+ 'rh7-1' => nil,
+ 'rh7-2' => '2224',
+ },
+ cfg.ports
+ )
+
+ cfg.ports['rh7-3'] = "4321"
+ assert_equal(
+ {
+ 'rh7-1' => nil,
+ 'rh7-2' => '2224',
+ 'rh7-3' => '4321',
+ },
+ cfg.ports
+ )
+
assert_equal(
'{
- "format_version": 2,
+ "format_version": 3,
"data_version": 9,
"tokens": {
"rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71"
+ },
+ "ports": {
+ "rh7-1": null,
+ "rh7-2": "2224",
+ "rh7-3": "4321"
}
}',
cfg.text
diff --git a/pcsd/test/tokens b/pcsd/test/tokens
index ee449b1..93e3e73 100644
--- a/pcsd/test/tokens
+++ b/pcsd/test/tokens
@@ -1,9 +1,14 @@
{
- "format_version": 2,
+ "format_version": 3,
"data_version": 9,
"tokens": {
"rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
"rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
"rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71"
+ },
+ "ports": {
+ "rh7-1": null,
+ "rh7-2": "2224",
+ "rh7-3": "1234"
}
}
diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
index 2b12aaa..0f87263 100644
--- a/pcsd/views/manage.erb
+++ b/pcsd/views/manage.erb
@@ -192,20 +192,21 @@
</table>
<table>
<tr><td align=right>Node Name/IP:</td><td><input size="50" name="node-name" type="text"></td></tr>
+ <tr><td align=right>PCSD port:</td><td><input class="port" size="5" name="node-port" type="text" placeholder="<%= PCSD_DEFAULT_PORT %>"></td></tr>
</table>
</form>
</div>
<div id="create_new_cluster" style="display: none;">
<form id="create_new_cluster_form" action="/manage/newcluster" method="post">
<br>
- Enter the hostnames of the nodes you would like to use to create a cluster:
+ Enter the hostnames and ports of the nodes you would like to use to create a cluster:
<br>
<br>
<table>
- <tr><td align=right>Cluster Name:</td><td><input size="50" name="clustername" type="text"></input></td></tr>
- <tr><td align=right>Node 1:</td><td><input size="50" name="node-1" type="text"></input></td></tr>
- <tr><td align=right>Node 2:</td><td><input size="50" name="node-2" type="text"></input></td></tr>
- <tr><td align=right>Node 3:</td><td><input size="50" name="node-3" type="text"></input></td></tr>
+ <tr><td align=right>Cluster Name:</td><td colspan="2"><input size="50" name="clustername" type="text"></input></td></tr>
+ <tr class="new_node"><td align=right>Node 1:</td><td><input class="node" size="50" name="node-1" type="text"></input></td><td>:<input class="port" size="5" name="port-1" type="text" placeholder="<%= PCSD_DEFAULT_PORT %>"/></td></tr>
+ <tr class="new_node"><td align=right>Node 2:</td><td><input class="node" size="50" name="node-2" type="text"></input></td><td>:<input class="port" size="5" name="port-2" type="text" placeholder="<%= PCSD_DEFAULT_PORT %>"/></td></tr>
+ <tr class="new_node"><td align=right>Node 3:</td><td><input class="node" size="50" name="node-3" type="text"></input></td><td>:<input class="port" size="5" name="port-3" type="text" placeholder="<%= PCSD_DEFAULT_PORT %>"/></td></tr>
<tr><td></td><td id="manage_more_nodes" onclick="create_cluster_add_nodes();" style="color: #2B85DB;">More nodes...</td></tr>
</table>
<table class="err_msg_table" style="width:100%">
diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb
index 3c3aeed..5849b75 100644
--- a/pcsd/views/nodes.erb
+++ b/pcsd/views/nodes.erb
@@ -330,6 +330,10 @@
<td>Node Name:</td>
<td><input type=text name=new_nodename></td>
</tr>
+ <tr>
+ <td>PCSD port:</td>
+ <td><input class="port" type="text" name="new_node_port" placeholder="<%= PCSD_DEFAULT_PORT %>"/></td>
+ </tr>
{{#if Pcs.need_ring1_address}}
<tr>
<td>Ring1 address:</td>
diff --git a/pylintrc b/pylintrc
index 3bdb564..bb4da37 100644
--- a/pylintrc
+++ b/pylintrc
@@ -64,7 +64,7 @@
#W0703: [broad-except] Catching too general exception %s
#W0710: [nonstandard-exception] Exception doesn't inherit from standard "Exception" class
#W1401: [anomalous-backslash-in-string] Anomalous backslash in string: \'%s\'. String constant might be missing an r prefix.
-disable=no-name-in-module, import-error, nonstandard-exception, unused-argument, redefined-outer-name, bare-except, anomalous-backslash-in-string, no-member, star-args, undefined-loop-variable, maybe-no-member, broad-except, too-few-public-methods, not-callable, protected-access, method-hidden, too-many-arguments, global-statement, unbalanced-tuple-unpacking, fixme, lost-exception, dangerous-default-value, too-many-return-statements, no-self-use, no-init, redefined-builtin, wildcard-imp [...]
+disable=no-name-in-module, import-error, nonstandard-exception, unused-argument, redefined-outer-name, bare-except, anomalous-backslash-in-string, no-member, star-args, undefined-loop-variable, maybe-no-member, broad-except, too-few-public-methods, not-callable, protected-access, method-hidden, too-many-arguments, global-statement, unbalanced-tuple-unpacking, fixme, lost-exception, dangerous-default-value, too-many-return-statements, no-self-use, no-init, redefined-builtin, wildcard-imp [...]
[DESIGN]
# Maximum number of locals for function / method body
diff --git a/setup.py b/setup.py
index df385cb..5ba9b27 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ class CleanCommand(Command):
setup(
name='pcs',
- version='0.9.159',
+ version='0.9.160',
description='Pacemaker Configuration System',
author='Chris Feist',
author_email='cfeist at redhat.com',
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git
More information about the Debian-HA-Commits
mailing list