[Debian-ha-commits] [pcs] 02/14: New upstream version 0.9.158
Valentin Vidic
vvidic-guest at moszumanska.debian.org
Thu Jun 29 01:29:50 UTC 2017
This is an automated email from the git hooks/post-receive script.
vvidic-guest pushed a commit to branch master
in repository pcs.
commit e4a3a8734b1b5d6003020cbc8c2428768e3903d9
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date: Thu Jun 29 00:31:04 2017 +0200
New upstream version 0.9.158
---
CHANGELOG.md | 214 +-
MANIFEST.in | 2 +-
Makefile | 45 +-
newversion.py | 13 +-
pcs/acl.py | 2 +-
pcs/alert.py | 2 +-
pcs/app.py | 119 +-
pcs/{bash_completion.sh => bash_completion} | 0
pcs/booth.py | 8 +-
pcs/cli/booth/command.py | 23 +-
pcs/cli/booth/console_report.py | 7 +-
pcs/cli/booth/env.py | 75 +-
pcs/cli/booth/test/test_env.py | 123 +-
pcs/{test/tools/test => cli/cluster}/__init__.py | 0
pcs/cli/cluster/command.py | 103 +
pcs/{test/tools => cli/cluster}/test/__init__.py | 0
pcs/cli/cluster/test/test_command.py | 24 +
pcs/cli/common/console_report.py | 592 +++-
pcs/cli/common/{env.py => env_cli.py} | 2 +
pcs/cli/common/env_file.py | 75 +
pcs/cli/common/errors.py | 6 +
pcs/cli/common/lib_wrapper.py | 89 +
pcs/cli/common/parse_args.py | 304 +-
pcs/cli/common/reports.py | 62 +-
pcs/cli/common/test/test_console_report.py | 938 ++++-
pcs/cli/common/test/test_env_file.py | 138 +
pcs/cli/common/test/test_parse_args.py | 394 ++-
pcs/cli/common/test/test_reports.py | 43 +-
pcs/cli/constraint_all/console_report.py | 16 +-
pcs/cli/constraint_all/test/test_console_report.py | 29 +-
pcs/cli/constraint_order/console_report.py | 2 +-
pcs/cli/fencing_topology.py | 24 +
pcs/{test/tools/test => cli/resource}/__init__.py | 0
pcs/cli/resource/parse_args.py | 191 ++
pcs/{test/tools => cli/resource}/test/__init__.py | 0
pcs/cli/resource/test/test_parse_args.py | 672 ++++
pcs/cluster.py | 792 +++--
pcs/common/env_file_role_codes.py | 1 +
...{env_file_role_codes.py => fencing_topology.py} | 5 +-
pcs/common/pcs_pycurl.py | 34 +
pcs/common/report_codes.py | 75 +-
pcs/common/test/test_tools.py | 24 +
pcs/common/tools.py | 29 +
pcs/config.py | 118 +-
pcs/constraint.py | 577 ++--
pcs/lib/booth/config_files.py | 4 -
pcs/lib/booth/config_structure.py | 11 +-
pcs/lib/booth/env.py | 6 +-
pcs/lib/booth/resource.py | 47 +-
pcs/lib/booth/test/test_config_structure.py | 10 +-
pcs/lib/booth/test/test_env.py | 53 +-
pcs/lib/booth/test/test_resource.py | 104 +-
pcs/lib/cib/acl.py | 278 +-
pcs/lib/cib/alert.py | 52 +-
pcs/lib/cib/constraint/colocation.py | 4 +-
pcs/lib/cib/constraint/constraint.py | 39 +-
pcs/lib/cib/constraint/resource_set.py | 8 +-
pcs/lib/cib/constraint/ticket.py | 6 +-
pcs/lib/cib/fencing_topology.py | 322 ++
pcs/lib/cib/node.py | 91 +
pcs/lib/cib/nvpair.py | 135 +-
pcs/lib/cib/resource.py | 15 -
pcs/lib/cib/resource/__init__.py | 17 +
pcs/lib/cib/resource/bundle.py | 516 +++
pcs/lib/cib/resource/clone.py | 71 +
pcs/lib/cib/resource/common.py | 203 ++
pcs/lib/cib/resource/group.py | 82 +
pcs/lib/cib/resource/guest_node.py | 243 ++
pcs/lib/cib/resource/operations.py | 364 ++
pcs/lib/cib/resource/primitive.py | 134 +
pcs/lib/cib/resource/remote_node.py | 219 ++
pcs/lib/cib/stonith.py | 15 +
.../cib/test/test_acl.py} | 338 +-
pcs/lib/cib/test/test_alert.py | 163 +-
pcs/lib/cib/test/test_constraint.py | 152 +-
pcs/lib/cib/test/test_constraint_colocation.py | 2 +-
pcs/lib/cib/test/test_constraint_order.py | 2 +-
pcs/lib/cib/test/test_constraint_ticket.py | 10 +-
pcs/lib/cib/test/test_fencing_topology.py | 984 ++++++
pcs/lib/cib/test/test_node.py | 233 ++
pcs/lib/cib/test/test_nvpair.py | 177 +-
pcs/lib/cib/test/test_resource.py | 21 -
pcs/lib/cib/test/test_resource_bundle.py | 42 +
pcs/lib/cib/test/test_resource_clone.py | 109 +
pcs/lib/cib/test/test_resource_common.py | 570 ++++
pcs/lib/cib/test/test_resource_group.py | 163 +
pcs/lib/cib/test/test_resource_guest_node.py | 444 +++
pcs/lib/cib/test/test_resource_operations.py | 392 +++
pcs/lib/cib/test/test_resource_primitive.py | 96 +
pcs/lib/cib/test/test_resource_remote_node.py | 287 ++
pcs/lib/cib/test/test_resource_set.py | 2 +-
.../cib/test/test_tools.py} | 531 +--
pcs/lib/cib/tools.py | 295 +-
pcs/lib/cluster_conf_facade.py | 4 +-
pcs/lib/commands/acl.py | 215 +-
pcs/lib/commands/alert.py | 5 +-
pcs/lib/commands/booth.py | 62 +-
pcs/lib/commands/cluster.py | 495 +++
pcs/lib/commands/fencing_topology.py | 122 +
pcs/lib/commands/node.py | 166 +
pcs/lib/commands/resource.py | 726 ++++
pcs/lib/commands/resource_agent.py | 18 +-
pcs/lib/commands/sbd.py | 273 +-
pcs/lib/commands/stonith.py | 143 +
pcs/lib/commands/stonith_agent.py | 19 +-
.../commands/test/resource}/__init__.py | 0
pcs/lib/commands/test/resource/common.py | 76 +
pcs/lib/commands/test/resource/fixture.py | 201 ++
.../commands/test/resource/test_bundle_create.py | 1152 +++++++
.../commands/test/resource/test_bundle_update.py | 826 +++++
.../commands/test/resource/test_resource_create.py | 1295 +++++++
.../test/resource/test_resource_enable_disable.py | 1519 +++++++++
.../test/resource/test_resource_manage_unmanage.py | 1092 ++++++
pcs/lib/commands/test/test_acl.py | 372 +-
pcs/lib/commands/test/test_alert.py | 49 +-
pcs/lib/commands/test/test_booth.py | 5 +-
pcs/lib/commands/test/test_fencing_topology.py | 257 ++
pcs/lib/commands/test/test_node.py | 296 ++
pcs/lib/commands/test/test_resource_agent.py | 31 +-
pcs/lib/commands/test/test_stonith_agent.py | 24 +-
pcs/lib/commands/test/test_ticket.py | 9 +-
pcs/lib/corosync/config_facade.py | 19 +-
pcs/lib/env.py | 94 +-
pcs/lib/env_file.py | 37 +-
pcs/lib/env_tools.py | 35 +
pcs/lib/errors.py | 23 +
pcs/lib/exchange_formats.md | 12 +
pcs/lib/external.py | 244 +-
pcs/lib/node.py | 31 +
pcs/lib/node_communication_format.py | 161 +
pcs/lib/nodes_task.py | 329 +-
pcs/{test/tools/test => lib/pacemaker}/__init__.py | 0
pcs/lib/pacemaker/env.py | 28 +
pcs/lib/{pacemaker.py => pacemaker/live.py} | 262 +-
pcs/lib/{pacemaker_state.py => pacemaker/state.py} | 120 +-
pcs/{test/tools => lib/pacemaker}/test/__init__.py | 0
.../pacemaker/test/test_live.py} | 447 +--
pcs/lib/pacemaker/test/test_state.py | 858 +++++
.../test/test_values.py} | 66 +-
.../{pacemaker_values.py => pacemaker/values.py} | 87 +-
pcs/lib/reports.py | 1015 +++++-
pcs/lib/resource_agent.py | 407 ++-
pcs/lib/sbd.py | 198 +-
pcs/{test/test_lib_env.py => lib/test/test_env.py} | 312 +-
pcs/lib/test/test_env_file.py | 132 +-
pcs/lib/test/test_errors.py | 64 +-
pcs/lib/test/test_node_communication_format.py | 119 +
.../test/test_nodes_task.py} | 280 +-
pcs/lib/test/test_resource_agent.py | 711 +++-
pcs/lib/test/test_validate.py | 1045 ++++++
pcs/lib/tools.py | 5 +
pcs/lib/validate.py | 532 +++
pcs/lib/xml_tools.py | 86 +
pcs/node.py | 205 +-
pcs/pcs.8 | 318 +-
pcs/pcsd.py | 102 +-
pcs/qdevice.py | 2 +-
pcs/quorum.py | 4 +-
pcs/resource.py | 856 ++---
pcs/settings_default.py | 10 +-
pcs/status.py | 2 +-
pcs/stonith.py | 616 ++--
pcs/test/{tools/test => cib_resource}/__init__.py | 0
pcs/test/cib_resource/common.py | 29 +
pcs/test/cib_resource/stonith_common.py | 30 +
pcs/test/cib_resource/test_bundle.py | 491 +++
pcs/test/cib_resource/test_create.py | 1470 ++++++++
pcs/test/cib_resource/test_manage_unmanage.py | 277 ++
pcs/test/cib_resource/test_operation_add.py | 135 +
pcs/test/cib_resource/test_stonith_create.py | 289 ++
.../cib_resource/test_stonith_enable_disable.py | 107 +
pcs/test/resources/cib-empty-2.3-withnodes.xml | 12 +
pcs/test/resources/cib-empty-2.5-withnodes.xml | 12 +
pcs/test/resources/cib-empty-2.5.xml | 2 +-
.../{cib-empty-2.5.xml => cib-empty-2.6.xml} | 2 +-
.../{cib-empty-2.5.xml => cib-empty-2.8.xml} | 2 +-
...{cib-empty-2.5.xml => cib-empty-with3nodes.xml} | 5 +-
.../resource_agent_ocf_heartbeat_dummy.xml | 49 +
pcs/test/suite.py | 46 +-
pcs/test/test_acl.py | 6 +-
pcs/test/test_alert.py | 35 +-
pcs/test/test_booth.py | 24 +-
pcs/test/test_cluster.py | 187 +-
pcs/test/test_cluster_pcmk_remote.py | 504 +++
pcs/test/test_constraints.py | 939 ++++-
pcs/test/test_lib_commands_quorum.py | 58 +-
pcs/test/test_lib_commands_sbd.py | 656 +++-
pcs/test/test_lib_corosync_config_facade.py | 69 +-
pcs/test/test_lib_external.py | 771 +++--
pcs/test/test_lib_node.py | 141 +-
pcs/test/test_lib_pacemaker_state.py | 154 -
pcs/test/test_lib_sbd.py | 554 ++-
pcs/test/test_lib_tools.py | 3 +
pcs/test/test_node.py | 507 ++-
pcs/test/test_quorum.py | 10 +-
pcs/test/test_resource.py | 3599 ++++++++++----------
pcs/test/test_stonith.py | 1425 +++++---
pcs/test/test_utils.py | 204 +-
pcs/test/test_xml_tools.py | 167 +
pcs/test/tools/assertions.py | 52 +-
pcs/test/tools/{test => check}/__init__.py | 0
pcs/test/tools/{test => check}/test_misc.py | 0
pcs/test/tools/cib.py | 64 +
pcs/test/tools/color_text_runner/format.py | 3 +-
pcs/test/tools/color_text_runner/result.py | 15 +-
pcs/test/tools/color_text_runner/writer.py | 42 +-
pcs/test/tools/custom_mock.py | 77 +-
pcs/test/tools/integration_lib.py | 127 +
pcs/test/tools/misc.py | 116 +-
pcs/test/tools/xml.py | 12 +-
pcs/usage.py | 577 ++--
pcs/utils.py | 502 +--
pcsd/Gemfile | 2 +
pcsd/Gemfile.lock | 12 +-
pcsd/Makefile | 21 +-
pcsd/bootstrap.rb | 19 +-
pcsd/pcs.rb | 230 +-
pcsd/pcsd-cli.rb | 2 +-
pcsd/pcsd.8 | 100 +
pcsd/pcsd.conf | 9 +-
pcsd/pcsd.rb | 8 +-
pcsd/pcsd_action_command.rb | 92 +
pcsd/pcsd_exchange_format.rb | 52 +
pcsd/pcsd_file.rb | 189 +
pcsd/pcsd_remove_file.rb | 29 +
pcsd/public/css/liberation.css | 1 -
pcsd/public/css/overpass.css | 17 +-
pcsd/public/css/overpass_bold-web.eot | Bin 32369 -> 0 bytes
pcsd/public/css/overpass_bold-web.svg | 470 ---
pcsd/public/css/overpass_bold-web.ttf | Bin 63656 -> 0 bytes
pcsd/public/css/overpass_bold-web.woff | Bin 38056 -> 0 bytes
pcsd/public/css/overpass_regular-web.eot | Bin 31030 -> 0 bytes
pcsd/public/css/overpass_regular-web.svg | 470 ---
pcsd/public/css/overpass_regular-web.ttf | Bin 61808 -> 0 bytes
pcsd/public/css/overpass_regular-web.woff | Bin 36352 -> 0 bytes
pcsd/public/js/nodes-ember.js | 12 +-
pcsd/public/js/pcsd.js | 30 +-
pcsd/remote.rb | 398 ++-
pcsd/resource.rb | 81 +-
pcsd/session.rb | 1 +
pcsd/settings.rb | 3 +
pcsd/settings.rb.debian | 3 +
pcsd/ssl.rb | 54 +-
pcsd/test/test_config.rb | 12 +-
pcsd/views/_resource.erb | 25 +-
pcsd/views/main.erb | 6 +
pcsd/views/nodes.erb | 9 +
pylintrc | 26 +-
setup.py | 5 +-
249 files changed, 40876 insertions(+), 9450 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5b3c97b..1c15a01 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,217 @@
# Change Log
+## [0.9.158] - 2017-05-23
+
+### Added
+- Support for bundle resources (CLI only) ([rhbz#1433016])
+- Commands for adding and removing guest and remote nodes including handling
+ pacemaker authkey (CLI only) ([rhbz#1176018], [rhbz#1254984], [rhbz#1386114],
+ [rhbz#1386512])
+- Command `pcs cluster node clear` to remove a node from pacemaker's
+ configuration and caches
+- Backing up and restoring cluster configuration by `pcs config backup` and
+ `pcs config restore` commands now support corosync and pacemaker authkeys
+ ([rhbz#1165821], [rhbz#1176018])
+
+### Deprecated
+- `pcs cluster remote-node add` and `pcs cluster remote-node remove `commands
+ have been deprecated in favor of `pcs cluster node add-guest` and `pcs
+ cluster node remove-guest` commands ([rhbz#1386512])
+
+### Fixed
+- Fixed a bug which under specific conditions caused pcsd to crash on start
+ when running under systemd ([ghissue#134])
+- `pcs resource unmanage` now sets the unmanaged flag to primitive resources
+ even if a clone or master/slave resource is specified. Thus the primitive
+ resources will not become managed just by uncloning. This also prevents some
+ discrepancies between disabled monitor operations and the unmanaged flag.
+ ([rhbz#1303969])
+- `pcs resource unmanage --monitor` now properly disables monitor operations
+ even if a clone or master/slave resource is specified. ([rhbz#1303969])
+- `--help` option now shows help just for the specified command. Previously the
+ usage for a whole group of commands was shown.
+- Fixed a crash when `pcs cluster cib-push` is called with an explicit value of
+ the `--wait` flag ([rhbz#1422667])
+- Handle pcsd crash when an unusable address is set in `PCSD_BIND_ADDR`
+ ([rhbz#1373614])
+- Removal of a pacemaker remote resource no longer causes the respective remote
+ node to be fenced ([rhbz#1390609])
+
+### Changed
+- Newly created clusters are set up to encrypt corosync communication
+ ([rhbz#1165821], [ghissue#98])
+
+[ghissue#98]: https://github.com/ClusterLabs/pcs/issues/98
+[ghissue#134]: https://github.com/ClusterLabs/pcs/issues/134
+[rhbz#1176018]: https://bugzilla.redhat.com/show_bug.cgi?id=1176018
+[rhbz#1254984]: https://bugzilla.redhat.com/show_bug.cgi?id=1254984
+[rhbz#1303969]: https://bugzilla.redhat.com/show_bug.cgi?id=1303969
+[rhbz#1373614]: https://bugzilla.redhat.com/show_bug.cgi?id=1373614
+[rhbz#1386114]: https://bugzilla.redhat.com/show_bug.cgi?id=1386114
+[rhbz#1386512]: https://bugzilla.redhat.com/show_bug.cgi?id=1386512
+[rhbz#1390609]: https://bugzilla.redhat.com/show_bug.cgi?id=1390609
+[rhbz#1422667]: https://bugzilla.redhat.com/show_bug.cgi?id=1422667
+[rhbz#1433016]: https://bugzilla.redhat.com/show_bug.cgi?id=1433016
+[rhbz#1165821]: https://bugzilla.redhat.com/show_bug.cgi?id=1165821
+
+
+## [0.9.157] - 2017-04-10
+
+### Added
+- Resources in location constraints now may be specified by resource name
+ patterns in addition to resource names ([rhbz#1362493])
+- Proxy settings description in pcsd configuration file ([rhbz#1315627])
+- Man page for pcsd ([rhbz#1378742])
+- Pcs now allows to set `trace_ra` and `trace_file` options of `ocf:heartbeat`
+ and `ocf:pacemaker` resources ([rhbz#1421702])
+- `pcs resource describe` and `pcs stonith describe` commands now show all
+ information about the specified agent if the `--full` flag is used
+- `pcs resource manage | unmanage` enables respectively disables monitor
+ operations when the `--monitor` flag is specified ([rhbz#1303969])
+- Support for shared storage in SBD. Currently, there is very limited support
+ in web UI ([rhbz#1413958])
+
+### Changed
+- It is now possible to specify more than one resource in the `pcs resource
+ enable` and `pcs resource disable` commands.
+
+### Fixed
+- Python 3: pcs no longer spams stderr with error messages when communicating
+ with another node
+- Stopping a cluster does not timeout too early and it generally works better
+ even if the cluster is running Virtual IP resources ([rhbz#1334429])
+- `pcs booth remove` now works correctly even if the booth resource group is
+ disabled (another fix) ([rhbz#1389941])
+- Fixed Cross-site scripting (XSS) vulnerability in web UI ([CVE-2017-2661],
+ [rhbz#1434111])
+- Pcs no longer allows to create a stonith resource based on an agent whose
+ name contains a colon ([rhbz#1415080])
+- Pcs command now launches Python interpreter with "sane" options (python -Es)
+ ([rhbz#1328882])
+- Clufter is now supported on both Python 2 and Python 3 ([rhbz#1428350])
+- Do not colorize clufter output if saved to a file
+
+[CVE-2017-2661]: https://access.redhat.com/security/cve/CVE-2017-2661
+[rhbz#1303969]: https://bugzilla.redhat.com/show_bug.cgi?id=1303969
+[rhbz#1315627]: https://bugzilla.redhat.com/show_bug.cgi?id=1315627
+[rhbz#1328882]: https://bugzilla.redhat.com/show_bug.cgi?id=1328882
+[rhbz#1334429]: https://bugzilla.redhat.com/show_bug.cgi?id=1334429
+[rhbz#1362493]: https://bugzilla.redhat.com/show_bug.cgi?id=1362493
+[rhbz#1378742]: https://bugzilla.redhat.com/show_bug.cgi?id=1378742
+[rhbz#1389941]: https://bugzilla.redhat.com/show_bug.cgi?id=1389941
+[rhbz#1413958]: https://bugzilla.redhat.com/show_bug.cgi?id=1413958
+[rhbz#1415080]: https://bugzilla.redhat.com/show_bug.cgi?id=1415080
+[rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702
+[rhbz#1428350]: https://bugzilla.redhat.com/show_bug.cgi?id=1428350
+[rhbz#1434111]: https://bugzilla.redhat.com/show_bug.cgi?id=1434111
+
+
+## [0.9.156] - 2017-02-10
+
+### Added
+- Fencing levels now may be targeted in CLI by a node name pattern or a node
+ attribute in addition to a node name ([rhbz#1261116])
+- `pcs cluster cib-push` allows to push a diff obtained internally by comparing
+ CIBs in specified files ([rhbz#1404233], [rhbz#1419903])
+- Added flags `--wait`, `--disabled`, `--group`, `--after`, `--before` into
+ the command `pcs stonith create`
+- Added commands `pcs stonith enable` and `pcs stonith disable`
+- Command line option --request-timeout ([rhbz#1292858])
+- Check whenever proxy is set when unable to connect to a node ([rhbz#1315627])
+
+### Changed
+- `pcs node [un]standby` and `pcs node [un]maintenance` is now atomic even if
+ more than one node is specified ([rhbz#1315992])
+- Restarting pcsd initiated from pcs is now a synchronous operation
+ ([rhbz#1284404])
+- Stopped bundling fonts used in pcsd GUI ([ghissue#125])
+- In `pcs resource create` flags `--master` and `--clone` changed to keywords
+ `master` and `clone`
+- libcurl is now used for node to node communication
+
+### Fixed
+- When upgrading CIB to the latest schema version, check for minimal common
+ version across the cluster ([rhbz#1389443])
+- `pcs booth remove` now works correctly even if the booth resource group is
+ disabled ([rhbz#1389941])
+- Adding a node in a CMAN cluster does not cause the new node to be fenced
+ immediately ([rhbz#1394846])
+- Show proper error message when there is an HTTP communication failure
+ ([rhbz#1394273])
+- Fixed searching for files to remove in the `/var/lib` directory ([ghpull#119],
+ [ghpull#120])
+- Fixed messages when managing services (start, stop, enable, disable...)
+- Fixed disabling services on systemd systems when using instances
+ ([rhbz#1389501])
+- Fixed parsing commandline options ([rhbz#1404229])
+- Pcs does not exit with a false error message anymore when pcsd-cli.rb outputs
+ to stderr ([ghissue#124])
+- Pcs now exits with an error when both `--all` and a list of nodes is specified
+ in the `pcs cluster start | stop | enable | disable` commands ([rhbz#1339355])
+- built-in help and man page fixes and improvements ([rhbz#1347335])
+- In `pcs resource create` the flag `--clone` no longer steals arguments from
+ the keywords `meta` and `op` ([rhbz#1395226])
+- `pcs resource create` does not produce invalid cib when group id is already
+ occupied with non-resource element ([rhbz#1382004])
+- Fixed misbehavior of the flag `--master` in `pcs resource create` command
+ ([rhbz#1378107])
+- Fixed tacit acceptance of invalid resource operation in `pcs resource create`
+ ([rhbz#1398562])
+- Fixed misplacing metadata for disabling when running `pcs resource create`
+ with flags `--clone` and `--disabled` ([rhbz#1402475])
+- Fixed incorrect acceptance of the invalid attribute of resource operation in
+ `pcs resource create` ([rhbz#1382597])
+- Fixed validation of options of resource operations in `pcs resource create`
+ ([rhbz#1390071])
+- Fixed silent omission of duplicate options ([rhbz#1390066])
+- Added more validation for resource agent names ([rhbz#1387670])
+- Fixed network communication issues in pcsd when a node was specified by an
+ IPv6 address
+- Fixed JS error in web UI when empty cluster status is received
+ ([rhbz#1396462])
+- Fixed sending user group in cookies from Python 3
+- Fixed pcsd restart in Python 3
+- Fixed parsing XML in Python 3 (caused crashes when reading resource agents
+ metadata) ([rhbz#1419639])
+- Fixed the recognition of the structure of a resource agent name that contains
+ a systemd instance ([rhbz#1419661])
+
+### Removed
+- Ruby 1.8 and 1.9 is no longer supported due to bad libcurl support
+
+[ghissue#124]: https://github.com/ClusterLabs/pcs/issues/124
+[ghissue#125]: https://github.com/ClusterLabs/pcs/issues/125
+[ghpull#119]: https://github.com/ClusterLabs/pcs/pull/119
+[ghpull#120]: https://github.com/ClusterLabs/pcs/pull/120
+[rhbz#1261116]: https://bugzilla.redhat.com/show_bug.cgi?id=1261116
+[rhbz#1284404]: https://bugzilla.redhat.com/show_bug.cgi?id=1284404
+[rhbz#1292858]: https://bugzilla.redhat.com/show_bug.cgi?id=1292858
+[rhbz#1315627]: https://bugzilla.redhat.com/show_bug.cgi?id=1315627
+[rhbz#1315992]: https://bugzilla.redhat.com/show_bug.cgi?id=1315992
+[rhbz#1339355]: https://bugzilla.redhat.com/show_bug.cgi?id=1339355
+[rhbz#1347335]: https://bugzilla.redhat.com/show_bug.cgi?id=1347335
+[rhbz#1378107]: https://bugzilla.redhat.com/show_bug.cgi?id=1378107
+[rhbz#1382004]: https://bugzilla.redhat.com/show_bug.cgi?id=1382004
+[rhbz#1382597]: https://bugzilla.redhat.com/show_bug.cgi?id=1382597
+[rhbz#1387670]: https://bugzilla.redhat.com/show_bug.cgi?id=1387670
+[rhbz#1389443]: https://bugzilla.redhat.com/show_bug.cgi?id=1389443
+[rhbz#1389501]: https://bugzilla.redhat.com/show_bug.cgi?id=1389501
+[rhbz#1389941]: https://bugzilla.redhat.com/show_bug.cgi?id=1389941
+[rhbz#1390066]: https://bugzilla.redhat.com/show_bug.cgi?id=1390066
+[rhbz#1390071]: https://bugzilla.redhat.com/show_bug.cgi?id=1390071
+[rhbz#1394273]: https://bugzilla.redhat.com/show_bug.cgi?id=1394273
+[rhbz#1394846]: https://bugzilla.redhat.com/show_bug.cgi?id=1394846
+[rhbz#1395226]: https://bugzilla.redhat.com/show_bug.cgi?id=1395226
+[rhbz#1396462]: https://bugzilla.redhat.com/show_bug.cgi?id=1396462
+[rhbz#1398562]: https://bugzilla.redhat.com/show_bug.cgi?id=1398562
+[rhbz#1402475]: https://bugzilla.redhat.com/show_bug.cgi?id=1402475
+[rhbz#1404229]: https://bugzilla.redhat.com/show_bug.cgi?id=1404229
+[rhbz#1404233]: https://bugzilla.redhat.com/show_bug.cgi?id=1404233
+[rhbz#1419639]: https://bugzilla.redhat.com/show_bug.cgi?id=1419639
+[rhbz#1419661]: https://bugzilla.redhat.com/show_bug.cgi?id=1419661
+[rhbz#1419903]: https://bugzilla.redhat.com/show_bug.cgi?id=1419903
+
+
## [0.9.155] - 2016-11-03
### Added
@@ -20,7 +232,7 @@
- When stopping a cluster with some of the nodes unreachable, stop the cluster
completely on all reachable nodes ([rhbz#1380372])
- Fixed pcsd crash when rpam rubygem is installed ([ghissue#109])
-- Fixed occasional crashes / failures when using locale other than en_US.UTF8
+- Fixed occasional crashes / failures when using locale other than en\_US.UTF8
([rhbz#1387106])
- Fixed starting and stopping cluster services on systemd machines without
the `service` executable ([ghissue#115])
diff --git a/MANIFEST.in b/MANIFEST.in
index e232624..68ab64c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,7 @@
include Makefile
include COPYING
include pcs/pcs.8
-include pcs/bash_completion.sh
+include pcs/bash_completion
include pcsd/.bundle/config
graft pcsd
graft pcsd/vendor/cache
diff --git a/Makefile b/Makefile
index 25fb87d..0e4dff4 100644
--- a/Makefile
+++ b/Makefile
@@ -5,7 +5,7 @@ IS_DEBIAN=false
DISTRO_DEBIAN_VER_8=false
ifndef PYTHON
- PYTHON=python
+ PYTHON := $(shell which python)
endif
ifeq ($(UNAME_OS_GNU),true)
@@ -81,11 +81,35 @@ ifndef BASH_COMPLETION_DIR
BASH_COMPLETION_DIR=${DESTDIR}/etc/bash_completion.d
endif
+ifndef PCSD_PARENT_DIR
+ ifeq ($(IS_DEBIAN),true)
+ PCSD_PARENT_DIR = /usr/share
+ else
+ PCSD_PARENT_DIR = ${PREFIX}/lib
+ endif
+endif
+
+pcsd_fonts = \
+ LiberationSans-Regular.ttf;LiberationSans:style=Regular \
+ LiberationSans-Bold.ttf;LiberationSans:style=Bold \
+ LiberationSans-BoldItalic.ttf;LiberationSans:style=BoldItalic \
+ LiberationSans-Italic.ttf;LiberationSans:style=Italic \
+ Overpass-Regular.ttf;Overpass:style=Regular \
+ Overpass-Bold.ttf;Overpass:style=Bold
+
+
install:
+ # make Python interpreter execution sane (via -Es flags)
+ echo -e "[build]\nexecutable = $(PYTHON) -Es\n" > setup.cfg
$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
+ # fix excessive script interpreting "executable" quoting with old setuptools:
+ # https://github.com/pypa/setuptools/issues/188
+ # https://bugzilla.redhat.com/1353934
+ sed -i '1s|^\(#!\)"\(.*\)"$$|\1\2|' ${DESTDIR}${PREFIX}/bin/pcs
+ rm setup.cfg
mkdir -p ${DESTDIR}${PREFIX}/sbin/
mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs
- install -D -m644 pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs
+ install -D -m644 pcs/bash_completion ${BASH_COMPLETION_DIR}/pcs
install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
ifeq ($(IS_DEBIAN),true)
ifeq ($(install_settings),true)
@@ -123,23 +147,30 @@ ifeq ($(IS_DEBIAN),true)
install -m 755 -D pcsd/pcsd.debian ${DESTDIR}/${initdir}/pcsd
endif
else
- mkdir -p ${DESTDIR}${PREFIX}/lib/
- cp -r pcsd ${DESTDIR}${PREFIX}/lib/
+ mkdir -p ${DESTDIR}${PCSD_PARENT_DIR}/
+ cp -r pcsd ${DESTDIR}${PCSD_PARENT_DIR}/
install -m 644 -D pcsd/pcsd.conf ${DESTDIR}/etc/sysconfig/pcsd
install -d ${DESTDIR}/etc/pam.d
install pcsd/pcsd.pam ${DESTDIR}/etc/pam.d/pcsd
ifeq ($(IS_SYSTEMCTL),true)
install -d ${DESTDIR}/${systemddir}/system/
install -m 644 pcsd/pcsd.service ${DESTDIR}/${systemddir}/system/
-# ${DESTDIR}${PREFIX}/lib/pcsd/pcsd holds the selinux context
- install -m 755 pcsd/pcsd.service-runner ${DESTDIR}${PREFIX}/lib/pcsd/pcsd
- rm ${DESTDIR}${PREFIX}/lib/pcsd/pcsd.service-runner
+# ${DESTDIR}${PCSD_PARENT_DIR}/pcsd/pcsd holds the selinux context
+ install -m 755 pcsd/pcsd.service-runner ${DESTDIR}${PCSD_PARENT_DIR}/pcsd/pcsd
+ rm ${DESTDIR}${PCSD_PARENT_DIR}/pcsd/pcsd.service-runner
else
install -m 755 -D pcsd/pcsd ${DESTDIR}/${initdir}/pcsd
endif
endif
install -m 700 -d ${DESTDIR}/var/lib/pcsd
install -m 644 -D pcsd/pcsd.logrotate ${DESTDIR}/etc/logrotate.d/pcsd
+ install -m644 -D pcsd/pcsd.8 ${DESTDIR}/${MANDIR}/man8/pcsd.8
+ $(foreach font,$(pcsd_fonts),\
+ $(eval font_file = $(word 1,$(subst ;, ,$(font)))) \
+ $(eval font_def = $(word 2,$(subst ;, ,$(font)))) \
+ $(eval font_path = $(shell fc-match '--format=%{file}' '$(font_def)')) \
+ $(if $(font_path),ln -s -f $(font_path) ${DESTDIR}${PCSD_PARENT_DIR}/pcsd/public/css/$(font_file);,$(error Font $(font_def) not found)) \
+ )
uninstall:
rm -f ${DESTDIR}${PREFIX}/sbin/pcs
diff --git a/newversion.py b/newversion.py
index 300a445..1dba780 100644
--- a/newversion.py
+++ b/newversion.py
@@ -29,11 +29,14 @@ print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcs/settin
print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcsd/bootstrap.rb"))
print(os.system("sed -i 's/\#\# \[Unreleased\]/\#\# ["+new_version+"] - "+datetime.date.today().strftime('%Y-%m-%d')+"/' CHANGELOG.md"))
-manpage_head = '.TH PCS "8" "{date}" "pcs {version}" "System Administration Utilities"'.format(
- date=datetime.date.today().strftime('%B %Y'),
- version=new_version
-)
-print(os.system("sed -i '1c " + manpage_head + "' pcs/pcs.8"))
+def manpage_head(component):
+ return '.TH {component} "8" "{date}" "pcs {version}" "System Administration Utilities"'.format(
+ component=component.upper(),
+ date=datetime.date.today().strftime('%B %Y'),
+ version=new_version
+ )
+print(os.system("sed -i '1c " + manpage_head("pcs") + "' pcs/pcs.8"))
+print(os.system("sed -i '1c " + manpage_head("pcsd") + "' pcsd/pcsd.8"))
print(os.system("git diff"))
print("Look good? (y/n)")
diff --git a/pcs/acl.py b/pcs/acl.py
index b526ae9..ffa53f6 100644
--- a/pcs/acl.py
+++ b/pcs/acl.py
@@ -12,10 +12,10 @@ from pcs import (
usage,
utils,
)
-from pcs.lib.pacemaker_values import is_true
from pcs.cli.common.console_report import indent
from pcs.cli.common.errors import CmdLineInputError
from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker.values import is_true
def acl_cmd(lib, argv, modifiers):
if len(argv) < 1:
diff --git a/pcs/alert.py b/pcs/alert.py
index 116fde1..e8b7146 100644
--- a/pcs/alert.py
+++ b/pcs/alert.py
@@ -18,7 +18,7 @@ from pcs.cli.common.parse_args import prepare_options, group_by_keywords
from pcs.cli.common.console_report import indent
from pcs.lib.errors import LibraryError
-parse_cmd_sections = partial(group_by_keywords, implicit_first_keyword="main")
+parse_cmd_sections = partial(group_by_keywords, implicit_first_group_key="main")
def alert_cmd(*args):
argv = args[1]
diff --git a/pcs/app.py b/pcs/app.py
index 23bd24c..a53f7eb 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -9,7 +9,6 @@ import getopt
import os
import sys
import logging
-logging.basicConfig()
from pcs import (
acl,
@@ -31,9 +30,10 @@ from pcs import (
alert,
)
-from pcs.cli.common import completion
+from pcs.cli.common import completion, parse_args
+logging.basicConfig()
usefile = False
filename = ""
def main(argv=None):
@@ -49,90 +49,41 @@ def main(argv=None):
global filename, usefile
orig_argv = argv[:]
utils.pcs_options = {}
- modified_argv = []
- real_argv = []
- try:
- # we change --cloneopt to "clone" for backwards compatibility
- new_argv = []
- for arg in argv:
- if arg == "--cloneopt" or arg == "--clone":
- new_argv.append("clone")
- elif arg.startswith("--cloneopt="):
- new_argv.append("clone")
- new_argv.append(arg.split('=',1)[1])
- else:
- new_argv.append(arg)
- argv = new_argv
- # we want to support optional arguments for --wait, so if an argument
- # is specified with --wait (ie. --wait=30) then we use them
- waitsecs = None
- new_argv = []
- for arg in argv:
- if arg.startswith("--wait="):
- tempsecs = arg.replace("--wait=","")
- if len(tempsecs) > 0:
- waitsecs = tempsecs
- arg = "--wait"
- new_argv.append(arg)
- argv = new_argv
+ argv = parse_args.upgrade_args(argv)
- # h = help, f = file,
- # p = password (cluster auth), u = user (cluster auth),
- # V = verbose (cluster verify)
- pcs_short_options = "hf:p:u:V"
- pcs_long_options = [
- "debug", "version", "help", "fullhelp",
- "force", "skip-offline", "autocorrect", "interactive", "autodelete",
- "all", "full", "groups", "local", "wait", "config",
- "start", "enable", "disabled", "off",
- "pacemaker", "corosync",
- "no-default-ops", "defaults", "nodesc",
- "clone", "master", "name=", "group=", "node=",
- "from=", "to=", "after=", "before=",
- "transport=", "rrpmode=", "ipv6",
- "addr0=", "bcast0=", "mcast0=", "mcastport0=", "ttl0=", "broadcast0",
- "addr1=", "bcast1=", "mcast1=", "mcastport1=", "ttl1=", "broadcast1",
- "wait_for_all=", "auto_tie_breaker=", "last_man_standing=",
- "last_man_standing_window=",
- "token=", "token_coefficient=", "consensus=", "join=",
- "miss_count_const=", "fail_recv_const=",
- "corosync_conf=", "cluster_conf=",
- "booth-conf=", "booth-key=",
- "remote", "watchdog=",
- #in pcs status - do not display resorce status on inactive node
- "hide-inactive",
- ]
- # pull out negative number arguments and add them back after getopt
- prev_arg = ""
- for arg in argv:
- if len(arg) > 0 and arg[0] == "-":
- if arg[1:].isdigit() or arg[1:].startswith("INFINITY"):
- real_argv.append(arg)
- else:
- modified_argv.append(arg)
- else:
- # If previous argument required an argument, then this arg
- # should not be added back in
- if not prev_arg or (not (prev_arg[0] == "-" and prev_arg[1:] in pcs_short_options) and not (prev_arg[0:2] == "--" and (prev_arg[2:] + "=") in pcs_long_options)):
- real_argv.append(arg)
- modified_argv.append(arg)
- prev_arg = arg
+ # we want to support optional arguments for --wait, so if an argument
+ # is specified with --wait (ie. --wait=30) then we use them
+ waitsecs = None
+ new_argv = []
+ for arg in argv:
+ if arg.startswith("--wait="):
+ tempsecs = arg.replace("--wait=","")
+ if len(tempsecs) > 0:
+ waitsecs = tempsecs
+ arg = "--wait"
+ new_argv.append(arg)
+ argv = new_argv
- pcs_options, argv = getopt.gnu_getopt(modified_argv, pcs_short_options, pcs_long_options)
+ try:
+ pcs_options, dummy_argv = getopt.gnu_getopt(
+ parse_args.filter_out_non_option_negative_numbers(argv),
+ parse_args.PCS_SHORT_OPTIONS,
+ parse_args.PCS_LONG_OPTIONS,
+ )
except getopt.GetoptError as err:
print(err)
usage.main()
sys.exit(1)
- argv = real_argv
+ argv = parse_args.filter_out_options(argv)
for o, a in pcs_options:
if not o in utils.pcs_options:
- if o == "--watchdog":
+ if o in ["--watchdog", "--device"]:
a = [a]
utils.pcs_options[o] = a
else:
# If any options are a list then they've been entered twice which isn't valid
- if o != "--watchdog":
+ if o not in ["--watchdog", "--device"]:
utils.err("%s can only be used once" % o)
else:
utils.pcs_options[o].append(a)
@@ -160,6 +111,22 @@ def main(argv=None):
sys.exit()
elif o == "--wait":
utils.pcs_options[o] = waitsecs
+ elif o == "--request-timeout":
+ request_timeout_valid = False
+ try:
+ timeout = int(a)
+ if timeout > 0:
+ utils.pcs_options[o] = timeout
+ request_timeout_valid = True
+ except ValueError:
+ pass
+ if not request_timeout_valid:
+ utils.err(
+ (
+ "'{0}' is not a valid --request-timeout value, use "
+ "a positive integer"
+ ).format(a)
+ )
if len(argv) == 0:
usage.main()
@@ -189,7 +156,11 @@ def main(argv=None):
"status": status.status_cmd,
"config": config.config_cmd,
"pcsd": pcsd.pcsd_cmd,
- "node": node.node_cmd,
+ "node": lambda argv: node.node_cmd(
+ utils.get_library_wrapper(),
+ argv,
+ utils.get_modificators()
+ ),
"quorum": lambda argv: quorum.quorum_cmd(
utils.get_library_wrapper(),
argv,
diff --git a/pcs/bash_completion.sh b/pcs/bash_completion
similarity index 100%
rename from pcs/bash_completion.sh
rename to pcs/bash_completion
diff --git a/pcs/booth.py b/pcs/booth.py
index 5ec41bf..5f41115 100644
--- a/pcs/booth.py
+++ b/pcs/booth.py
@@ -12,7 +12,7 @@ from pcs import utils
from pcs.cli.booth import command
from pcs.cli.common.errors import CmdLineInputError
from pcs.lib.errors import LibraryError
-from pcs.resource import resource_create, resource_remove, resource_restart
+from pcs.resource import resource_remove, resource_restart
def booth_cmd(lib, argv, modifiers):
@@ -26,7 +26,7 @@ def booth_cmd(lib, argv, modifiers):
sub_cmd, argv_next = argv[0], argv[1:]
try:
if sub_cmd == "help":
- usage.booth(argv)
+ usage.booth([" ".join(argv_next)] if argv_next else [])
elif sub_cmd == "config":
command.config_show(lib, argv_next, modifiers)
elif sub_cmd == "setup":
@@ -47,9 +47,7 @@ def booth_cmd(lib, argv, modifiers):
else:
raise CmdLineInputError()
elif sub_cmd == "create":
- command.get_create_in_cluster(resource_create, resource_remove)(
- lib, argv_next, modifiers
- )
+ command.create_in_cluster(lib, argv_next, modifiers)
elif sub_cmd == "remove":
command.get_remove_from_cluster(resource_remove)(
lib, argv_next, modifiers
diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py
index 72b2c73..b56266f 100644
--- a/pcs/cli/booth/command.py
+++ b/pcs/cli/booth/command.py
@@ -94,21 +94,14 @@ def ticket_revoke(lib, arg_list, modifiers):
def ticket_grant(lib, arg_list, modifiers):
ticket_operation(lib.booth.ticket_grant, arg_list, modifiers)
-def get_create_in_cluster(resource_create, resource_remove):
- #TODO resource_remove is provisional hack until resources are not moved to
- #lib
- def create_in_cluster(lib, arg_list, modifiers):
- if len(arg_list) != 2 or arg_list[0] != "ip":
- raise CmdLineInputError()
- ip = arg_list[1]
-
- lib.booth.create_in_cluster(
- __get_name(modifiers),
- ip,
- resource_create,
- resource_remove,
- )
- return create_in_cluster
+def create_in_cluster(lib, arg_list, modifiers):
+ if len(arg_list) != 2 or arg_list[0] != "ip":
+ raise CmdLineInputError()
+ lib.booth.create_in_cluster(
+ __get_name(modifiers),
+ ip=arg_list[1],
+ allow_absent_resource_agent=modifiers["force"]
+ )
def get_remove_from_cluster(resource_remove):
#TODO resource_remove is provisional hack until resources are not moved to
diff --git a/pcs/cli/booth/console_report.py b/pcs/cli/booth/console_report.py
index 5dd0397..9acd76f 100644
--- a/pcs/cli/booth/console_report.py
+++ b/pcs/cli/booth/console_report.py
@@ -10,9 +10,10 @@ from pcs.common import report_codes as codes
def format_booth_default(value, template):
return "" if value in ("booth", "", None) else template.format(value)
-#Each value (callable taking report_item.info) returns string template.
-#Optionaly the template can contain placehodler {force} for next processing.
-#Placeholder {force} will be appended if is necessary and if is not presset
+#Each value (a callable taking report_item.info) returns a message.
+#Force text will be appended if necessary.
+#If it is necessary to put the force text inside the string then the callable
+#must take the force_text parameter.
CODE_TO_MESSAGE_BUILDER_MAP = {
codes.BOOTH_LACK_OF_SITES: lambda info:
"lack of sites for booth configuration (need 2 at least): sites {0}"
diff --git a/pcs/cli/booth/env.py b/pcs/cli/booth/env.py
index 918e487..908b9dc 100644
--- a/pcs/cli/booth/env.py
+++ b/pcs/cli/booth/env.py
@@ -5,58 +5,12 @@ from __future__ import (
unicode_literals,
)
-import os.path
-
from pcs.cli.common import console_report
-from pcs.common import report_codes, env_file_role_codes as file_role_codes
+from pcs.common.env_file_role_codes import BOOTH_CONFIG, BOOTH_KEY
from pcs.lib.errors import LibraryEnvError
+from pcs.cli.common import env_file
-def read_env_file(path):
- try:
- return {
- "content": open(path).read() if os.path.isfile(path) else None
- }
- except EnvironmentError as e:
- raise console_report.error(
- "Unable to read {0}: {1}".format(path, e.strerror)
- )
-
-def write_env_file(env_file, file_path):
- try:
- f = open(file_path, "wb" if env_file.get("is_binary", False) else "w")
- f.write(env_file["content"])
- f.close()
- except EnvironmentError as e:
- raise console_report.error(
- "Unable to write {0}: {1}".format(file_path, e.strerror)
- )
-
-def process_no_existing_file_expectation(file_role, env_file, file_path):
- if(
- env_file["no_existing_file_expected"]
- and
- os.path.exists(file_path)
- ):
- msg = "{0} {1} already exists".format(file_role, file_path)
- if not env_file["can_overwrite_existing_file"]:
- raise console_report.error(
- "{0}, use --force to override".format(msg)
- )
- console_report.warn(msg)
-
-def is_missing_file_report(report, file_role_code):
- return (
- report.code == report_codes.FILE_DOES_NOT_EXIST
- and
- report.info["file_role"] == file_role_code
- )
-
-def report_missing_file(file_role, file_path):
- console_report.error(
- "{0} '{1}' does not exist".format(file_role, file_path)
- )
-
def middleware_config(name, config_path, key_path):
if config_path and not key_path:
raise console_report.error(
@@ -75,8 +29,8 @@ def middleware_config(name, config_path, key_path):
return {"name": name}
return {
"name": name,
- "config_file": read_env_file(config_path),
- "key_file": read_env_file(key_path),
+ "config_file": env_file.read(config_path),
+ "key_file": env_file.read(key_path, is_binary=True),
"key_path": key_path,
}
@@ -89,31 +43,30 @@ def middleware_config(name, config_path, key_path):
#pcs.cli.common.lib_wrapper.lib_env_to_cli_env
raise console_report.error("Error during library communication")
- process_no_existing_file_expectation(
+ env_file.process_no_existing_file_expectation(
"booth config file",
modified_env["config_file"],
config_path
)
- process_no_existing_file_expectation(
+ env_file.process_no_existing_file_expectation(
"booth key file",
modified_env["key_file"],
key_path
)
- write_env_file(modified_env["key_file"], key_path)
- write_env_file(modified_env["config_file"], config_path)
+ env_file.write(modified_env["key_file"], key_path)
+ env_file.write(modified_env["config_file"], config_path)
def apply(next_in_line, env, *args, **kwargs):
env.booth = create_booth_env()
try:
result_of_next = next_in_line(env, *args, **kwargs)
except LibraryEnvError as e:
- for report in e.args:
- if is_missing_file_report(report, file_role_codes.BOOTH_CONFIG):
- report_missing_file("Booth config file", config_path)
- e.sign_processed(report)
- if is_missing_file_report(report, file_role_codes.BOOTH_KEY):
- report_missing_file("Booth key file", key_path)
- e.sign_processed(report)
+ missing_file = env_file.MissingFileCandidateInfo
+
+ env_file.evaluate_for_missing_files(e, [
+ missing_file(BOOTH_CONFIG, "Booth config file", config_path),
+ missing_file(BOOTH_KEY, "Booth key file", key_path),
+ ])
raise e
flush(env.booth["modified_env"])
return result_of_next
diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py
index e1e59e2..6c2cfb4 100644
--- a/pcs/cli/booth/test/test_env.py
+++ b/pcs/cli/booth/test/test_env.py
@@ -7,16 +7,23 @@ from __future__ import (
from pcs.test.tools.pcs_unittest import TestCase
-from pcs.cli.booth.env import middleware_config
+from pcs.cli.booth import env
from pcs.common import report_codes, env_file_role_codes
from pcs.lib.errors import LibraryEnvError, ReportItem
from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.misc import create_setup_patch_mixin
+SetupPatchMixin = create_setup_patch_mixin(env)
-class BoothConfTest(TestCase):
- @mock.patch("pcs.cli.booth.env.os.path.isfile")
- def test_sucessfully_care_about_local_file(self, mock_is_file):
- #setup, fixtures
+class BoothConfTest(TestCase, SetupPatchMixin):
+ def setUp(self):
+ self.write = self.setup_patch("env_file.write")
+ self.read = self.setup_patch("env_file.read")
+ self.process_no_existing_file_expectation = self.setup_patch(
+ "env_file.process_no_existing_file_expectation"
+ )
+
+ def test_sucessfully_care_about_local_file(self):
def next_in_line(env):
env.booth["modified_env"] = {
"config_file": {
@@ -29,55 +36,62 @@ class BoothConfTest(TestCase):
}
}
return "call result"
- mock_is_file.return_value = True
- mock_env = mock.MagicMock()
- mock_open = mock.mock_open()
- with mock.patch(
- "pcs.cli.booth.env.open",
- mock_open,
- create=True
- ):
- #run tested code
- booth_conf_middleware = middleware_config(
- "booth-name",
- "/local/file/path.conf",
- "/local/file/path.key",
- )
+ mock_env = mock.MagicMock()
+ booth_conf_middleware = env.middleware_config(
+ "booth-name",
+ "/local/file/path.conf",
+ "/local/file/path.key",
+ )
- self.assertEqual(
- "call result",
- booth_conf_middleware(next_in_line, mock_env)
- )
+ self.assertEqual(
+ "call result",
+ booth_conf_middleware(next_in_line, mock_env)
+ )
- #assertions
- self.assertEqual(mock_is_file.mock_calls,[
- mock.call("/local/file/path.conf"),
- mock.call("/local/file/path.key"),
+ self.assertEqual(self.read.mock_calls, [
+ mock.call('/local/file/path.conf'),
+ mock.call('/local/file/path.key', is_binary=True),
])
- self.assertEqual(mock_env.booth["name"], "booth-name")
- self.assertEqual(mock_env.booth["config_file"], {"content": ""})
- self.assertEqual(mock_env.booth["key_file"], {"content": ""})
+ self.assertEqual(self.process_no_existing_file_expectation.mock_calls, [
+ mock.call(
+ 'booth config file',
+ {
+ 'content': 'file content',
+ 'no_existing_file_expected': False
+ },
+ '/local/file/path.conf'
+ ),
+ mock.call(
+ 'booth key file',
+ {
+ 'content': 'key file content',
+ 'no_existing_file_expected': False
+ },
+ '/local/file/path.key'
+ ),
+ ])
- self.assertEqual(mock_open.mock_calls, [
- mock.call(u'/local/file/path.conf'),
- mock.call().read(),
- mock.call(u'/local/file/path.key'),
- mock.call().read(),
- mock.call(u'/local/file/path.key', u'w'),
- mock.call().write(u'key file content'),
- mock.call().close(),
- mock.call(u'/local/file/path.conf', u'w'),
- mock.call().write(u'file content'),
- mock.call().close(),
+ self.assertEqual(self.write.mock_calls, [
+ mock.call(
+ {
+ 'content': 'key file content',
+ 'no_existing_file_expected': False
+ },
+ '/local/file/path.key'
+ ),
+ mock.call(
+ {
+ 'content': 'file content',
+ 'no_existing_file_expected': False
+ },
+ '/local/file/path.conf'
+ )
])
- @mock.patch("pcs.cli.booth.env.console_report")
- @mock.patch("pcs.cli.booth.env.os.path.isfile")
- def test_catch_exactly_his_exception(
- self, mock_is_file, mock_console_report
- ):
+ def test_catch_exactly_his_exception(self):
+ report_missing = self.setup_patch("env_file.report_missing")
next_in_line = mock.Mock(side_effect=LibraryEnvError(
ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, info={
"file_role": env_file_role_codes.BOOTH_CONFIG,
@@ -87,11 +101,10 @@ class BoothConfTest(TestCase):
}),
ReportItem.error("OTHER ERROR", info={}),
))
- mock_is_file.return_value = False
mock_env = mock.MagicMock()
+ self.read.return_value = {"content": None}
- #run tested code
- booth_conf_middleware = middleware_config(
+ booth_conf_middleware = env.middleware_config(
"booth-name",
"/local/file/path.conf",
"/local/file/path.key",
@@ -103,16 +116,10 @@ class BoothConfTest(TestCase):
except Exception as e:
raised_exception.append(e)
raise e
-
self.assertRaises(LibraryEnvError, run_middleware)
self.assertEqual(1, len(raised_exception[0].unprocessed))
self.assertEqual("OTHER ERROR", raised_exception[0].unprocessed[0].code)
-
- self.assertEqual(mock_console_report.error.mock_calls, [
- mock.call(
- "Booth config file '/local/file/path.conf' does not exist"
- ),
- mock.call(
- "Booth key file '/local/file/path.key' does not exist"
- ),
+ self.assertEqual(report_missing.mock_calls, [
+ mock.call('Booth config file', '/local/file/path.conf'),
+ mock.call('Booth key file', '/local/file/path.key'),
])
diff --git a/pcs/test/tools/test/__init__.py b/pcs/cli/cluster/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/cli/cluster/__init__.py
diff --git a/pcs/cli/cluster/command.py b/pcs/cli/cluster/command.py
new file mode 100644
index 0000000..f725326
--- /dev/null
+++ b/pcs/cli/cluster/command.py
@@ -0,0 +1,103 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.cli.resource.parse_args import(
+ parse_create_simple as parse_resource_create_args
+)
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.common.parse_args import prepare_options
+
+def _node_add_remote_separate_host_and_name(arg_list):
+ node_host = arg_list[0]
+ if len(arg_list) == 1:
+ node_name = node_host
+ rest_args = []
+ elif "=" in arg_list[1] or arg_list[1] in ["op", "meta"]:
+ node_name = node_host
+ rest_args = arg_list[1:]
+ else:
+ node_name = arg_list[1]
+ rest_args = arg_list[2:]
+
+ return node_host, node_name, rest_args
+
+def node_add_remote(lib, arg_list, modifiers):
+ if not arg_list:
+ raise CmdLineInputError()
+
+ node_host, node_name, rest_args = _node_add_remote_separate_host_and_name(
+ arg_list
+ )
+
+ parts = parse_resource_create_args(rest_args)
+ force = modifiers["force"]
+
+ lib.cluster.node_add_remote(
+ node_host,
+ node_name,
+ parts["op"],
+ parts["meta"],
+ parts["options"],
+ allow_incomplete_distribution=force,
+ allow_pacemaker_remote_service_fail=force,
+ allow_invalid_operation=force,
+ allow_invalid_instance_attributes=force,
+ use_default_operations=not modifiers["no-default-ops"],
+ wait=modifiers["wait"],
+ )
+
+def create_node_remove_remote(remove_resource):
+ def node_remove_remote(lib, arg_list, modifiers):
+ if not arg_list:
+ raise CmdLineInputError()
+ lib.cluster.node_remove_remote(
+ arg_list[0],
+ remove_resource,
+ allow_remove_multiple_nodes=modifiers["force"],
+ allow_pacemaker_remote_service_fail=modifiers["force"],
+ )
+ return node_remove_remote
+
+def node_add_guest(lib, arg_list, modifiers):
+ if len(arg_list) < 2:
+ raise CmdLineInputError()
+
+
+ node_name = arg_list[0]
+ resource_id = arg_list[1]
+ meta_options = prepare_options(arg_list[2:])
+
+ force = modifiers["force"]
+
+ lib.cluster.node_add_guest(
+ node_name,
+ resource_id,
+ meta_options,
+ allow_incomplete_distribution=force,
+ allow_pacemaker_remote_service_fail=force,
+ wait=modifiers["wait"],
+ )
+
+def node_remove_guest(lib, arg_list, modifiers):
+ if not arg_list:
+ raise CmdLineInputError()
+
+ lib.cluster.node_remove_guest(
+ arg_list[0],
+ allow_remove_multiple_nodes=modifiers["force"],
+ allow_pacemaker_remote_service_fail=modifiers["force"],
+ wait=modifiers["wait"],
+ )
+
+def node_clear(lib, arg_list, modifiers):
+ if len(arg_list) != 1:
+ raise CmdLineInputError()
+
+ lib.cluster.node_clear(
+ arg_list[0],
+ allow_clear_cluster_node=modifiers["force"]
+ )
diff --git a/pcs/test/tools/test/__init__.py b/pcs/cli/cluster/test/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/cli/cluster/test/__init__.py
diff --git a/pcs/cli/cluster/test/test_command.py b/pcs/cli/cluster/test/test_command.py
new file mode 100644
index 0000000..9a8e76b
--- /dev/null
+++ b/pcs/cli/cluster/test/test_command.py
@@ -0,0 +1,24 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.cli.cluster import command
+
+class ParseNodeAddRemote(TestCase):
+ def test_deal_with_explicit_name(self):
+ self.assertEqual(
+ command._node_add_remote_separate_host_and_name(
+ ["host", "name", "a=b"]
+ ),
+ ("host", "name", ["a=b"])
+ )
+
+ def test_deal_with_implicit_name(self):
+ self.assertEqual(
+ command._node_add_remote_separate_host_and_name(["host", "a=b"]),
+ ("host", "host", ["a=b"])
+ )
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index 643550f..793ff8d 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -5,11 +5,13 @@ from __future__ import (
unicode_literals,
)
-import sys
+from collections import Iterable
from functools import partial
+import sys
from pcs.common import report_codes as codes
-from collections import Iterable
+from pcs.common.fencing_topology import TARGET_TYPE_ATTRIBUTE
+from pcs.common.tools import is_string
INSTANCE_SUFFIX = "@{0}"
NODE_PREFIX = "{0}: "
@@ -35,8 +37,13 @@ def indent(line_list, indent_step=2):
for line in line_list
]
-def format_optional(value, template):
- return "" if not value else template.format(value)
+def format_optional(value, template, empty_case=""):
+ return empty_case if not value else template.format(value)
+
+def format_fencing_level_target(target_type, target_value):
+ if target_type == TARGET_TYPE_ATTRIBUTE:
+ return "{0}={1}".format(target_value[0], target_value[1])
+ return target_value
def service_operation_started(operation, info):
return "{operation}{service}{instance_suffix}...".format(
@@ -74,10 +81,84 @@ def service_operation_skipped(operation, info):
**info
)
+def id_belongs_to_unexpected_type(info):
+ translate_expected = {
+ "acl_group": "an acl group",
+ "acl_target": "an acl user",
+ "group": "a group",
+ }
+ return "'{id}' is not {expected_type}".format(
+ id=info["id"],
+ expected_type="/".join([
+ translate_expected.get(tag, "{0}".format(tag))
+ for tag in info["expected_types"]
+ ]),
+ )
+
+def id_not_found(info):
+ desc = format_optional(info["id_description"], "{0} ")
+ if not info["context_type"] or not info["context_id"]:
+ return "{desc}'{id}' does not exist".format(desc=desc, id=info["id"])
+
+ return (
+ "there is no {desc}'{id}' in the {context_type} '{context_id}'".format(
+ desc=desc,
+ id=info["id"],
+ context_type=info["context_type"],
+ context_id=info["context_id"],
+ )
+ )
+
+def resource_running_on_nodes(info):
+ role_label_map = {
+ "Started": "running",
+ }
+ state_info = {}
+ for state, node_list in info["roles_with_nodes"].items():
+ state_info.setdefault(
+ role_label_map.get(state, state.lower()),
+ []
+ ).extend(node_list)
+
+ return "resource '{resource_id}' is {detail_list}".format(
+ resource_id=info["resource_id"],
+ detail_list="; ".join(sorted([
+ "{run_type} on node{s} {node_list}".format(
+ run_type=run_type,
+ s="s" if len(node_list) > 1 else "",
+ node_list=joined_list(node_list)
+ )
+ for run_type, node_list in state_info.items()
+ ]))
+ )
+
+def build_node_description(node_types):
+ if not node_types:
+ return "Node"
+
+ label = "{0} node".format
+
+ if is_string(node_types):
+ return label(node_types)
-#Each value (callable taking report_item.info) returns string template.
-#Optionaly the template can contain placehodler {force} for next processing.
-#Placeholder {force} will be appended if is necessary and if is not presset
+ if len(node_types) == 1:
+ return label(node_types[0])
+
+ return "nor " + " or ".join([label(ntype) for ntype in node_types])
+
+def joined_list(item_list, optional_transformations=None):
+ if not optional_transformations:
+ optional_transformations={}
+
+ return ", ".join(sorted([
+ "'{0}'".format(optional_transformations.get(item, item))
+ for item in item_list
+ ]))
+
+#Each value (a callable taking report_item.info) returns a message.
+#Force text will be appended if necessary.
+#If it is necessary to put the force text inside the string then the callable
+#must take the force_text parameter.
CODE_TO_MESSAGE_BUILDER_MAP = {
codes.COMMON_ERROR: lambda info: info["text"],
@@ -87,35 +168,100 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
codes.EMPTY_RESOURCE_SET_LIST: "Resource set list is empty",
codes.REQUIRED_OPTION_IS_MISSING: lambda info:
- "required option '{option_name}' is missing"
- .format(**info)
+ "required {desc}option{s} {option_names_list} {are} missing"
+ .format(
+ desc=format_optional(info["option_type"], "{0} "),
+ option_names_list=joined_list(info["option_names"]),
+ s=("s" if len(info["option_names"]) > 1 else ""),
+ are=(
+ "are" if len(info["option_names"]) > 1
+ else "is"
+ )
+ )
,
- codes.INVALID_OPTION: lambda info:
- "invalid {desc}option '{option_name}', allowed options are: {allowed_values}"
+ codes.PREREQUISITE_OPTION_IS_MISSING: lambda info:
+ (
+ "If {opt_desc}option '{option_name}' is specified, "
+ "{pre_desc}option '{prerequisite_name}' must be specified as well"
+ ).format(
+ opt_desc=format_optional(info.get("option_type"), "{0} "),
+ pre_desc=format_optional(info.get("prerequisite_type"), "{0} "),
+ **info
+ )
+ ,
+
+ codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING: lambda info:
+ "{desc}option {option_names_list} has to be specified"
.format(
+ desc=format_optional(info.get("option_type"), "{0} "),
+ option_names_list=" or ".join(sorted([
+ "'{0}'".format(name)
+ for name in info["option_names"]
+ ])),
+ )
+ ,
+
+ codes.INVALID_OPTION: lambda info:
+ (
+ "invalid {desc}option{s} {option_names_list},"
+ " allowed option{are} {allowed_values}"
+ ).format(
desc=format_optional(info["option_type"], "{0} "),
- allowed_values=", ".join(info["allowed"]),
+ allowed_values=", ".join(sorted(info["allowed"])),
+ option_names_list=joined_list(info["option_names"]),
+ s=("s:" if len(info["option_names"]) > 1 else ""),
+ are=("s are:" if len(info["allowed"]) > 1 else " is"),
**info
)
,
codes.INVALID_OPTION_VALUE: lambda info:
+ #value on key "allowed_values" is overloaded:
+ # * it can be a list - then it express possible option values
+ # * it can be a string - then it is verbal description of value
"'{option_value}' is not a valid {option_name} value, use {hint}"
.format(
hint=(
- ", ".join(info["allowed_values"])
- if (
+ ", ".join(sorted(info["allowed_values"])) if (
isinstance(info["allowed_values"], Iterable)
and
- not isinstance(info["allowed_values"], "".__class__)
- )
- else info["allowed_values"]
+ not is_string(info["allowed_values"])
+ ) else info["allowed_values"]
+ ),
+ **info
+ )
+ ,
+
+ codes.INVALID_OPTION_TYPE: lambda info:
+ #value on key "allowed_types" is overloaded:
+ # * it can be a list - then it express possible option types
+ # * it can be a string - then it is verbal description of the type
+ "specified {option_name} is not valid, use {hint}"
+ .format(
+ hint=(
+ ", ".join(sorted(info["allowed_types"])) if (
+ isinstance(info["allowed_types"], Iterable)
+ and
+ not is_string(info["allowed_types"])
+ ) else info["allowed_types"]
),
**info
)
,
+ codes.MUTUALLY_EXCLUSIVE_OPTIONS: lambda info:
+ # "{desc}options {option_names} are muttually exclusive".format(
+ "Only one of {desc}options {option_names} can be used".format(
+ desc=format_optional(info["option_type"], "{0} "),
+ option_names=(
+ joined_list(sorted(info["option_names"])[:-1])
+ +
+ " and '{0}'".format(sorted(info["option_names"])[-1])
+ )
+ )
+ ,
+
codes.EMPTY_ID: lambda info:
"{id_description} cannot be empty"
.format(**info)
@@ -147,11 +293,17 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
codes.RUN_EXTERNAL_PROCESS_STARTED: lambda info:
- "Running: {command}\n{stdin_part}".format(
+ "Running: {command}\nEnvironment:{env_part}\n{stdin_part}".format(
stdin_part=format_optional(
info["stdin"],
"--Debug Input Start--\n{0}\n--Debug Input End--\n"
),
+ env_part=(
+ "" if not info["environment"] else "\n" + "\n".join([
+ " {0}={1}".format(key, val)
+ for key, val in sorted(info["environment"].items())
+ ])
+ ),
**info
)
,
@@ -174,6 +326,15 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
+ codes.NODE_COMMUNICATION_DEBUG_INFO: lambda info:
+ (
+ "Communication debug info for calling: {target}\n"
+ "--Debug Communication Info Start--\n"
+ "{data}\n"
+ "--Debug Communication Info End--\n"
+ ).format(**info)
+ ,
+
codes.NODE_COMMUNICATION_STARTED: lambda info:
"Sending HTTP Request to: {target}\n{data_part}".format(
data_part=format_optional(
@@ -232,6 +393,33 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
+ codes.NODE_COMMUNICATION_ERROR_TIMED_OUT: lambda info:
+ "{node}: Connection timeout ({reason})"
+ .format(**info)
+ ,
+
+ codes.NODE_COMMUNICATION_PROXY_IS_SET:
+ "Proxy is set in environment variables, try disabling it"
+ ,
+
+ codes.CANNOT_ADD_NODE_IS_IN_CLUSTER: lambda info:
+ "cannot add the node '{node}' because it is in a cluster"
+ .format(**info)
+ ,
+
+ codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE: lambda info:
+ (
+ "cannot add the node '{node}' because it is running service"
+ " '{service}'{guess}"
+ ).format(
+ guess=(
+ "" if info["service"] not in ["pacemaker", "pacemaker_remote"]
+ else " (is not the node already in a cluster?)"
+ ),
+ **info
+ )
+ ,
+
codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED:
"Sending updated corosync.conf to nodes..."
,
@@ -403,19 +591,18 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
- codes.ID_NOT_FOUND: lambda info:
- "{desc}'{id}' does not exist"
+ codes.ID_BELONGS_TO_UNEXPECTED_TYPE: id_belongs_to_unexpected_type,
+
+ codes.ID_NOT_FOUND: id_not_found,
+
+ codes.STONITH_RESOURCES_DO_NOT_EXIST: lambda info:
+ "Stonith resource(s) '{stonith_id_list}' do not exist"
.format(
- desc=format_optional(info["id_description"], "{0} "),
+ stonith_id_list="', '".join(info["stonith_ids"]),
**info
)
,
- codes.RESOURCE_DOES_NOT_EXIST: lambda info:
- "Resource '{resource_id}' does not exist"
- .format(**info)
- ,
-
codes.CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET: lambda info:
"Role '{role_id}' is already asigned to '{target_id}'"
.format(**info)
@@ -431,6 +618,37 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
+ codes.CIB_FENCING_LEVEL_ALREADY_EXISTS: lambda info:
+ (
+ "Fencing level for '{target}' at level '{level}' "
+ "with device(s) '{device_list}' already exists"
+ ).format(
+ device_list=",".join(info["devices"]),
+ target=format_fencing_level_target(
+ info["target_type"], info["target_value"]
+ ),
+ **info
+ )
+ ,
+
+ codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST: lambda info:
+ "Fencing level {part_target}{part_level}{part_devices}does not exist"
+ .format(
+ part_target=(
+ "for '{0}' ".format(format_fencing_level_target(
+ info["target_type"], info["target_value"]
+ ))
+ if info["target_type"] and info["target_value"]
+ else ""
+ ),
+ part_level=format_optional(info["level"], "at level '{0}' "),
+ part_devices=format_optional(
+ ",".join(info["devices"]) if info["devices"] else "",
+ "with device(s) '{0}' "
+ )
+ )
+ ,
+
codes.CIB_LOAD_ERROR: "unable to get cib",
codes.CIB_LOAD_ERROR_SCOPE_MISSING: lambda info:
@@ -452,6 +670,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
+ codes.CIB_SAVE_TMP_ERROR: lambda info:
+ "Unable to save CIB to a temporary file: {reason}"
+ .format(**info)
+ ,
+
codes.CRM_MON_ERROR:
"error running crm_mon, is pacemaker running?"
,
@@ -460,20 +683,31 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
"cannot load cluster status, xml does not conform to the schema"
,
- codes.RESOURCE_WAIT_NOT_SUPPORTED:
+ codes.WAIT_FOR_IDLE_NOT_SUPPORTED:
"crm_resource does not support --wait, please upgrade pacemaker"
,
- codes.RESOURCE_WAIT_TIMED_OUT: lambda info:
+ codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER:
+ "Cannot use '-f' together with '--wait'"
+ ,
+
+ codes.WAIT_FOR_IDLE_TIMED_OUT: lambda info:
"waiting timeout\n\n{reason}"
.format(**info)
,
- codes.RESOURCE_WAIT_ERROR: lambda info:
+ codes.WAIT_FOR_IDLE_ERROR: lambda info:
"{reason}"
.format(**info)
,
+ codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE: lambda info:
+ (
+ "bundle '{bundle_id}' already contains resource '{resource_id}'"
+ ", a bundle may contain at most one resource"
+ ).format(**info)
+ ,
+
codes.RESOURCE_CLEANUP_ERROR: lambda info:
(
"Unable to cleanup resource: {resource}\n{reason}"
@@ -491,11 +725,84 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
).format(**info)
,
+ codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION: lambda info: (
+ "multiple specification of the same operation with the same interval:\n"
+ +"\n".join([
+ "{0} with intervals {1}".format(name, ", ".join(intervals))
+ for name, intervals_list in info["duplications"].items()
+ for intervals in intervals_list
+ ])
+ ),
+
+ codes.RESOURCE_OPERATION_INTERVAL_ADAPTED: lambda info:
+ (
+ "changing a {operation_name} operation interval"
+ " from {original_interval}"
+ " to {adapted_interval} to make the operation unique"
+ ).format(**info)
+ ,
+
+ codes.RESOURCE_RUNNING_ON_NODES: resource_running_on_nodes,
+
+ codes.RESOURCE_DOES_NOT_RUN: lambda info:
+ "resource '{resource_id}' is not running on any node"
+ .format(**info)
+ ,
+
+ codes.RESOURCE_IS_UNMANAGED: lambda info:
+ "'{resource_id}' is unmanaged"
+ .format(**info)
+ ,
+
+ codes.RESOURCE_IS_GUEST_NODE_ALREADY: lambda info:
+ "the resource '{resource_id}' is already a guest node"
+ .format(**info)
+ ,
+
+ codes.RESOURCE_MANAGED_NO_MONITOR_ENABLED: lambda info:
+ (
+ "Resource '{resource_id}' has no enabled monitor operations."
+ " Re-run with '--monitor' to enable them."
+ )
+ .format(**info)
+ ,
+
codes.NODE_NOT_FOUND: lambda info:
- "node '{node}' does not appear to exist in configuration"
+ "{desc} '{node}' does not appear to exist in configuration".format(
+ desc=build_node_description(info["searched_types"]),
+ node=info["node"]
+ )
+ ,
+
+ codes.NODE_REMOVE_IN_PACEMAKER_FAILED: lambda info:
+ "unable to remove node '{node_name}' from pacemaker{reason_part}"
+ .format(
+ reason_part=format_optional(info["reason"], ": {0}"),
+ **info
+
+ )
+ ,
+
+ codes.NODE_TO_CLEAR_IS_STILL_IN_CLUSTER: lambda info:
+ (
+ "node '{node}' seems to be still in the cluster"
+ "; this command should be used only with nodes that have been"
+ " removed from the cluster"
+ )
.format(**info)
,
+ codes.MULTIPLE_RESULTS_FOUND: lambda info:
+ "multiple {result_type} {search_description} found: {what_found}"
+ .format(
+ what_found=joined_list(info["result_identifier_list"]),
+ search_description="" if not info["search_description"]
+ else "for '{0}'".format(info["search_description"])
+ ,
+ result_type=info["result_type"]
+ )
+ ,
+
codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND: lambda info:
"unable to get local node name from pacemaker: {reason}"
.format(**info)
@@ -574,13 +881,24 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
codes.INVALID_RESOURCE_AGENT_NAME: lambda info:
(
"Invalid resource agent name '{name}'."
- " Use standard:provider:type or standard:type."
+ " Use standard:provider:type when standard is 'ocf' or"
+ " standard:type otherwise."
" List of standards and providers can be obtained by using commands"
" 'pcs resource standards' and 'pcs resource providers'"
)
.format(**info)
,
+ codes.INVALID_STONITH_AGENT_NAME: lambda info:
+ (
+ "Invalid stonith agent name '{name}'."
+ " List of agents can be obtained by using command"
+ " 'pcs stonith list'. Do not use the 'stonith:' prefix. Agent name"
+ " cannot contain the ':' character."
+ )
+ .format(**info)
+ ,
+
codes.AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE: lambda info:
(
"Multiple agents match '{agent}'"
@@ -632,11 +950,153 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
codes.SBD_DISABLING_STARTED: "Disabling SBD service...",
+ codes.SBD_DEVICE_INITIALIZATION_STARTED: lambda info:
+ "Initializing device(s) {devices}..."
+ .format(devices=", ".join(info["device_list"]))
+ ,
+
+ codes.SBD_DEVICE_INITIALIZATION_SUCCESS:
+ "Device(s) initialized successfuly",
+
+ codes.SBD_DEVICE_INITIALIZATION_ERROR: lambda info:
+ "Initialization of device(s) failed: {reason}"
+ .format(**info)
+ ,
+
+ codes.SBD_DEVICE_LIST_ERROR: lambda info:
+ "Unable to get list of messages from device '{device}': {reason}"
+ .format(**info)
+ ,
+
+ codes.SBD_DEVICE_MESSAGE_ERROR: lambda info:
+ "Unable to set message '{message}' for node '{node}' on device "
+ "'{device}'"
+ .format(**info)
+ ,
+
+ codes.SBD_DEVICE_DUMP_ERROR: lambda info:
+ "Unable to get SBD headers from device '{device}': {reason}"
+ .format(**info)
+ ,
+
+ codes.FILES_DISTRIBUTION_STARTED: lambda info:
+ "Sending {description}{where}".format(
+ where=(
+ "" if not info["node_list"]
+ else " to " + joined_list(info["node_list"])
+ ),
+ description=info["description"] if info["description"]
+ else joined_list(info["file_list"])
+ )
+ ,
+
+ codes.FILE_DISTRIBUTION_SUCCESS: lambda info:
+ "{node}: successful distribution of the file '{file_description}'"
+ .format(
+ **info
+ )
+ ,
+
+
+ codes.FILE_DISTRIBUTION_ERROR: lambda info:
+ "{node}: unable to distribute file '{file_description}': {reason}"
+ .format(
+ **info
+ )
+ ,
+
+ codes.FILES_REMOVE_FROM_NODE_STARTED: lambda info:
+ "Requesting remove {description}{where}".format(
+ where=(
+ "" if not info["node_list"]
+ else " from " + joined_list(info["node_list"])
+ ),
+ description=info["description"] if info["description"]
+ else joined_list(info["file_list"])
+ )
+ ,
+
+ codes.FILE_REMOVE_FROM_NODE_SUCCESS: lambda info:
+ "{node}: successful removal of the file '{file_description}'"
+ .format(
+ **info
+ )
+ ,
+
+
+ codes.FILE_REMOVE_FROM_NODE_ERROR: lambda info:
+ "{node}: unable to remove file '{file_description}': {reason}"
+ .format(
+ **info
+ )
+ ,
+
+ codes.SERVICE_COMMANDS_ON_NODES_STARTED: lambda info:
+ "Requesting {description}{where}".format(
+ where=(
+ "" if not info["node_list"]
+ else " on " + joined_list(info["node_list"])
+ ),
+ description=info["description"] if info["description"]
+ else joined_list(info["action_list"])
+ )
+ ,
+
+ codes.SERVICE_COMMAND_ON_NODE_SUCCESS: lambda info:
+ "{node}: successful run of '{service_command_description}'"
+ .format(
+ **info
+ )
+ ,
+
+ codes.SERVICE_COMMAND_ON_NODE_ERROR: lambda info:
+ (
+ "{node}: service command failed:"
+ " {service_command_description}: {reason}"
+ )
+ .format(
+ **info
+ )
+ ,
+
+ codes.SBD_DEVICE_PATH_NOT_ABSOLUTE: lambda info:
+ "Device path '{device}'{on_node} is not absolute"
+ .format(
+ on_node=format_optional(
+ info["node"], " on node '{0}'".format(info["node"])
+ ),
+ **info
+ )
+ ,
+
+ codes.SBD_DEVICE_DOES_NOT_EXIST: lambda info:
+ "{node}: device '{device}' not found"
+ .format(**info)
+ ,
+
+ codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE: lambda info:
+ "{node}: device '{device}' is not a block device"
+ .format(**info)
+ ,
+
codes.INVALID_RESPONSE_FORMAT: lambda info:
"{node}: Invalid format of response"
.format(**info)
,
+ codes.SBD_NO_DEVICE_FOR_NODE: lambda info:
+ "No device defined for node '{node}'"
+ .format(**info)
+ ,
+
+ codes.SBD_TOO_MANY_DEVICES_FOR_NODE: lambda info:
+ (
+ "More than {max_devices} devices defined for node '{node}' "
+ "(devices: {devices})"
+ )
+ .format(devices=", ".join(info["device_list"]), **info)
+ ,
+
codes.SBD_NOT_INSTALLED: lambda info:
"SBD is not installed on node '{node}'"
.format(**info)
@@ -674,14 +1134,8 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
.format(**info)
,
- codes.CIB_ALERT_NOT_FOUND: lambda info:
- "Alert '{alert}' not found."
- .format(**info)
- ,
-
- codes.CIB_UPGRADE_SUCCESSFUL: lambda info:
+ codes.CIB_UPGRADE_SUCCESSFUL:
"CIB has been upgraded to the latest schema version."
- .format(**info)
,
codes.CIB_UPGRADE_FAILED: lambda info:
@@ -701,7 +1155,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
codes.FILE_ALREADY_EXISTS: lambda info:
"{node_prefix}{role_prefix}file {file_path} already exists"
.format(
- node_prefix=format_optional(info["node"], NODE_PREFIX),
+ node_prefix=format_optional(info["node"], NODE_PREFIX),
role_prefix=format_optional(info["file_role"], "{0} "),
**info
)
@@ -736,7 +1190,47 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
codes.LIVE_ENVIRONMENT_REQUIRED: lambda info:
"This command does not support {forbidden_options}"
- .format(forbidden_options=", ".join(info["forbidden_options"]))
+ .format(
+ forbidden_options=joined_list(info["forbidden_options"], {
+ "CIB": "-f",
+ "COROSYNC_CONF": "--corosync_conf",
+ })
+ )
+ ,
+
+ codes.LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE:
+ "Node(s) must be specified if -f is used"
+ ,
+
+ codes.NOLIVE_SKIP_FILES_DISTRIBUTION: lambda info:
+ (
+ "the distribution of {files} to {nodes} was skipped because command"
+ " does not run on live cluster (e.g. -f was used)."
+ " You will have to do it manually."
+ ).format(
+ files=joined_list(info["files_description"]),
+ nodes=joined_list(info["nodes"]),
+ )
+ ,
+ codes.NOLIVE_SKIP_FILES_REMOVE: lambda info:
+ (
+ "{files} remove from {nodes} was skipped because command"
+ " does not run on live cluster (e.g. -f was used)."
+ " You will have to do it manually."
+ ).format(
+ files=joined_list(info["files_description"]),
+ nodes=joined_list(info["nodes"]),
+ )
+ ,
+ codes.NOLIVE_SKIP_SERVICE_COMMAND_ON_NODES: lambda info:
+ (
+ "running '{command}' on {nodes} was skipped"
+ " because command does not run on live cluster (e.g. -f was"
+ " used). You will have to run it manually."
+ ).format(
+ command="{0} {1}".format(info["service"], info["command"]),
+ nodes=joined_list(info["nodes"]),
+ )
,
codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD: lambda info:
@@ -758,4 +1252,22 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
"Unable to read {path}: {reason}"
.format(**info)
,
+ codes.USE_COMMAND_NODE_ADD_REMOTE: lambda info:
+ (
+ "this command is not sufficient for creating a remote connection,"
+ " use 'pcs cluster node add-remote'"
+ )
+ ,
+ codes.USE_COMMAND_NODE_ADD_GUEST: lambda info:
+ (
+ "this command is not sufficient for creating a guest node, use"
+ " 'pcs cluster node add-guest'"
+ )
+ ,
+ codes.USE_COMMAND_NODE_REMOVE_GUEST: lambda info:
+ (
+ "this command is not sufficient for removing a guest node, use"
+ " 'pcs cluster node remove-guest'"
+ )
+ ,
}
diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env_cli.py
similarity index 87%
rename from pcs/cli/common/env.py
rename to pcs/cli/common/env_cli.py
index 60f66a4..cfe08fc 100644
--- a/pcs/cli/common/env.py
+++ b/pcs/cli/common/env_cli.py
@@ -14,6 +14,8 @@ class Env(object):
self.groups = None
self.corosync_conf_data = None
self.booth = None
+ self.pacemaker = None
self.auth_tokens_getter = None
self.debug = False
self.cluster_conf_data = None
+ self.request_timeout = None
diff --git a/pcs/cli/common/env_file.py b/pcs/cli/common/env_file.py
new file mode 100644
index 0000000..56e6065
--- /dev/null
+++ b/pcs/cli/common/env_file.py
@@ -0,0 +1,75 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import os.path
+from collections import namedtuple
+
+from pcs.cli.common import console_report
+from pcs.common import report_codes
+
+
+def report_missing(file_role, file_path):
+ console_report.error(
+ "{0} '{1}' does not exist".format(file_role, file_path)
+ )
+
+def is_missing_report(report, file_role_code):
+ return (
+ report.code == report_codes.FILE_DOES_NOT_EXIST
+ and
+ report.info["file_role"] == file_role_code
+ )
+
+def process_no_existing_file_expectation(file_role, env_file, file_path):
+ if(
+ env_file["no_existing_file_expected"]
+ and
+ os.path.exists(file_path)
+ ):
+ msg = "{0} {1} already exists".format(file_role, file_path)
+ if not env_file["can_overwrite_existing_file"]:
+ raise console_report.error(
+ "{0}, use --force to override".format(msg)
+ )
+ console_report.warn(msg)
+
+def write(env_file, file_path):
+ try:
+ f = open(file_path, "wb" if env_file.get("is_binary", False) else "w")
+ f.write(env_file["content"])
+ f.close()
+ except EnvironmentError as e:
+ raise console_report.error(
+ "Unable to write {0}: {1}".format(file_path, e.strerror)
+ )
+
+def read(path, is_binary=False):
+ try:
+ mode = "rb" if is_binary else "r"
+ return {
+ "content": open(path, mode).read() if os.path.isfile(path) else None
+ }
+ except EnvironmentError as e:
+ raise console_report.error(
+ "Unable to read {0}: {1}".format(path, e.strerror)
+ )
+
+MissingFileCandidateInfo = namedtuple(
+ "MissingFileCandidateInfo",
+ "code desc path"
+)
+
+def evaluate_for_missing_files(exception, file_info_list):
+ """
+ list of MissingFileCandidateInfo file_info_list contains the info for files
+ that can be missing
+ """
+ for report in exception.args:
+ for file_info in file_info_list:
+ if is_missing_report(report, file_info.code):
+ report_missing(file_info.desc, file_info.path)
+ exception.sign_processed(report)
diff --git a/pcs/cli/common/errors.py b/pcs/cli/common/errors.py
index 19ca734..47eca00 100644
--- a/pcs/cli/common/errors.py
+++ b/pcs/cli/common/errors.py
@@ -5,6 +5,12 @@ from __future__ import (
unicode_literals,
)
+
+ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE = (
+ "Cannot specify both --all and a list of nodes."
+)
+
+
class CmdLineInputError(Exception):
"""
Exception express that user entered incorrect commad in command line.
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index fb5a904..683ba4d 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -18,9 +18,14 @@ from pcs.lib.commands import (
acl,
alert,
booth,
+ cluster,
+ fencing_topology,
+ node,
qdevice,
quorum,
resource_agent,
+ resource,
+ stonith,
sbd,
stonith_agent,
)
@@ -49,6 +54,7 @@ def cli_env_to_lib_env(cli_env):
booth=cli_env.booth,
auth_tokens_getter=cli_env.auth_tokens_getter,
cluster_conf_data=cli_env.cluster_conf_data,
+ request_timeout=cli_env.request_timeout,
)
def lib_env_to_cli_env(lib_env, cli_env):
@@ -176,6 +182,22 @@ def load_module(env, middleware_factory, name):
}
)
+ if name == "cluster":
+ return bind_all(
+ env,
+ middleware.build(
+ middleware_factory.cib,
+ middleware_factory.corosync_conf_existing,
+ ),
+ {
+ "node_add_remote": cluster.node_add_remote,
+ "node_add_guest": cluster.node_add_guest,
+ "node_remove_remote": cluster.node_remove_remote,
+ "node_remove_guest": cluster.node_remove_guest,
+ "node_clear": cluster.node_clear,
+ }
+ )
+
if name == 'constraint_colocation':
return bind_all(
env,
@@ -208,6 +230,37 @@ def load_module(env, middleware_factory, name):
}
)
+ if name == "fencing_topology":
+ return bind_all(
+ env,
+ middleware.build(middleware_factory.cib),
+ {
+ "add_level": fencing_topology.add_level,
+ "get_config": fencing_topology.get_config,
+ "remove_all_levels": fencing_topology.remove_all_levels,
+ "remove_levels_by_params":
+ fencing_topology.remove_levels_by_params,
+ "verify": fencing_topology.verify,
+ }
+ )
+
+ if name == "node":
+ return bind_all(
+ env,
+ middleware.build(middleware_factory.cib),
+ {
+ "maintenance_unmaintenance_all":
+ node.maintenance_unmaintenance_all,
+ "maintenance_unmaintenance_list":
+ node.maintenance_unmaintenance_list,
+ "maintenance_unmaintenance_local":
+ node.maintenance_unmaintenance_local,
+ "standby_unstandby_all": node.standby_unstandby_all,
+ "standby_unstandby_list": node.standby_unstandby_list,
+ "standby_unstandby_local": node.standby_unstandby_local,
+ }
+ )
+
if name == "qdevice":
return bind_all(
env,
@@ -261,6 +314,39 @@ def load_module(env, middleware_factory, name):
}
)
+ if name == "resource":
+ return bind_all(
+ env,
+ middleware.build(
+ middleware_factory.cib
+ ),
+ {
+ "bundle_create": resource.bundle_create,
+ "bundle_update": resource.bundle_update,
+ "create": resource.create,
+ "create_as_master": resource.create_as_master,
+ "create_as_clone": resource.create_as_clone,
+ "create_in_group": resource.create_in_group,
+ "create_into_bundle": resource.create_into_bundle,
+ "disable": resource.disable,
+ "enable": resource.enable,
+ "manage": resource.manage,
+ "unmanage": resource.unmanage,
+ }
+ )
+ if name == "stonith":
+ return bind_all(
+ env,
+ middleware.build(
+ middleware_factory.cib
+ ),
+ {
+ "create": stonith.create,
+ "create_in_group": stonith.create_in_group,
+ }
+ )
+
+
if name == "sbd":
return bind_all(
env,
@@ -271,6 +357,9 @@ def load_module(env, middleware_factory, name):
"get_cluster_sbd_status": sbd.get_cluster_sbd_status,
"get_cluster_sbd_config": sbd.get_cluster_sbd_config,
"get_local_sbd_config": sbd.get_local_sbd_config,
+ "initialize_block_devices": sbd.initialize_block_devices,
+ "get_local_devices_info": sbd.get_local_devices_info,
+ "set_message": sbd.set_message,
}
)
diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
index d17c5da..465cb96 100644
--- a/pcs/cli/common/parse_args.py
+++ b/pcs/cli/common/parse_args.py
@@ -7,48 +7,300 @@ from __future__ import (
from pcs.cli.common.errors import CmdLineInputError
+
+ARG_TYPE_DELIMITER = "%"
+
+# h = help, f = file,
+# p = password (cluster auth), u = user (cluster auth),
+# V = verbose (cluster verify)
+PCS_SHORT_OPTIONS = "hf:p:u:V"
+PCS_LONG_OPTIONS = [
+ "debug", "version", "help", "fullhelp",
+ "force", "skip-offline", "autocorrect", "interactive", "autodelete",
+ "all", "full", "groups", "local", "wait", "config",
+ "start", "enable", "disabled", "off", "request-timeout=",
+ "pacemaker", "corosync",
+ "no-default-ops", "defaults", "nodesc",
+ "clone", "master", "name=", "group=", "node=",
+ "from=", "to=", "after=", "before=",
+ "transport=", "rrpmode=", "ipv6",
+ "addr0=", "bcast0=", "mcast0=", "mcastport0=", "ttl0=", "broadcast0",
+ "addr1=", "bcast1=", "mcast1=", "mcastport1=", "ttl1=", "broadcast1",
+ "wait_for_all=", "auto_tie_breaker=", "last_man_standing=",
+ "last_man_standing_window=",
+ "token=", "token_coefficient=", "consensus=", "join=",
+ "miss_count_const=", "fail_recv_const=",
+ "corosync_conf=", "cluster_conf=",
+ "booth-conf=", "booth-key=",
+ "remote", "watchdog=", "device=",
+ #in pcs status - do not display resorce status on inactive node
+ "hide-inactive",
+ # pcs resource (un)manage - enable or disable monitor operations
+ "monitor",
+]
+
def split_list(arg_list, separator):
"""return list of list of arg_list using separator as delimiter"""
separator_indexes = [i for i, x in enumerate(arg_list) if x == separator]
bounds = zip([0]+[i+1 for i in separator_indexes], separator_indexes+[None])
return [arg_list[i:j] for i, j in bounds]
+def split_option(arg):
+ """
+ Get (key, value) from a key=value commandline argument.
+
+ Split the argument by the first = and return resulting parts. Raise
+ CmdLineInputError if the argument cannot be splitted.
+
+ string arg -- commandline argument
+ """
+ if "=" not in arg:
+ raise CmdLineInputError("missing value of '{0}' option".format(arg))
+ if arg.startswith("="):
+ raise CmdLineInputError("missing key in '{0}' option".format(arg))
+ return arg.split("=", 1)
+
def prepare_options(cmdline_args):
- """return dictionary of options from comandline key=value args"""
+ """return dictionary of options from commandline key=value args"""
options = dict()
for arg in cmdline_args:
- if "=" not in arg:
- raise CmdLineInputError("missing value of '{0}' option".format(arg))
- if arg.startswith("="):
- raise CmdLineInputError("missing key in '{0}' option".format(arg))
-
- name, value = arg.split("=", 1)
- options[name] = value
+ name, value = split_option(arg)
+ if name not in options:
+ options[name] = value
+ elif options[name] != value:
+ raise CmdLineInputError(
+ "duplicate option '{0}' with different values '{1}' and '{2}'"
+ .format(name, options[name], value)
+ )
return options
def group_by_keywords(
arg_list, keyword_set,
- implicit_first_keyword=None, keyword_repeat_allowed=True,
+ implicit_first_group_key=None, keyword_repeat_allowed=True,
+ group_repeated_keywords=None, only_found_keywords=False
):
- groups = dict([(keyword, []) for keyword in keyword_set])
- if implicit_first_keyword:
- groups[implicit_first_keyword] = []
+ """
+ Return dictionary with keywords as keys and following argumets as value.
+ For example when keywords are "first" and "seconds" then for arg_list
+ ["first", 1, 2, "second", 3] it returns {"first": [1, 2], "second": [3]}
- if not arg_list:
- return groups
+ list arg_list is commandline arguments containing keywords
+ set keyword_set contain all expected keywords
+ string implicit_first_group_key is the key for capturing of arguments before
+ the occurrence of the first keyword. implicit_first_group_key is not
+ a keyword => its occurence in args is considered as ordinary argument.
+ bool keyword_repeat_allowed is the flag to turn on/off checking the
+ uniqueness of each keyword in arg_list.
+ list group_repeated_keywords contains keywords for which each occurence is
+ packed separately. For example when keywords are "first" and "seconds"
+ and group_repeated_keywords is ["first"] then for arg_list
+ ["first", 1, 2, "second", 3, "first", 4] it returns
+ {"first": [[1, 2], [4]], "second": [3]}.
+ For these keywords is allowed repeating.
+ bool only_found_keywords is flag for deciding to (not)contain keywords
+ that do not appeared in arg_list.
+ """
- used_keywords = []
- if implicit_first_keyword:
- used_keywords.append(implicit_first_keyword)
- elif arg_list[0] not in keyword_set:
- raise CmdLineInputError()
+ def get_keywords_for_grouping():
+ if not group_repeated_keywords:
+ return []
+ #implicit_first_group_key is not keyword: when it is in
+ #group_repeated_keywords but not in keyword_set is considered as
+ #unknown.
+ unknown_keywords = set(group_repeated_keywords) - set(keyword_set)
+ if unknown_keywords:
+ #to avoid developer mistake
+ raise AssertionError(
+ "Keywords in grouping not in keyword set: {0}"
+ .format(", ".join(unknown_keywords))
+ )
+ return group_repeated_keywords
- for arg in arg_list:
- if arg in list(groups.keys()):
- if arg in used_keywords and not keyword_repeat_allowed:
+ def get_completed_groups():
+ completed_groups = groups.copy()
+ if not only_found_keywords:
+ for keyword in keyword_set:
+ if keyword not in completed_groups:
+ completed_groups[keyword] = []
+ if(
+ implicit_first_group_key
+ and
+ implicit_first_group_key not in completed_groups
+ ):
+ completed_groups[implicit_first_group_key] = []
+ return completed_groups
+
+ def is_acceptable_keyword_occurence(keyword):
+ return (
+ keyword not in groups.keys()
+ or
+ keyword_repeat_allowed
+ or
+ keyword in keywords_for_grouping
+ )
+
+ def process_keyword(keyword):
+ if not is_acceptable_keyword_occurence(keyword):
+ raise CmdLineInputError(
+ "'{0}' cannot be used more than once".format(keyword)
+ )
+ groups.setdefault(keyword, [])
+ if keyword in keywords_for_grouping:
+ groups[keyword].append([])
+
+ def process_non_keyword(keyword, arg):
+ place = groups[keyword]
+ if keyword in keywords_for_grouping:
+ place = place[-1]
+ place.append(arg)
+
+ groups = {}
+ keywords_for_grouping = get_keywords_for_grouping()
+
+ if arg_list:
+ current_keyword = None
+ if arg_list[0] not in keyword_set:
+ if not implicit_first_group_key:
raise CmdLineInputError()
- used_keywords.append(arg)
- else:
- groups[used_keywords[-1]].append(arg)
+ process_keyword(implicit_first_group_key)
+ current_keyword = implicit_first_group_key
+
+ for arg in arg_list:
+ if arg in keyword_set:
+ process_keyword(arg)
+ current_keyword = arg
+ else:
+ process_non_keyword(current_keyword, arg)
+
+ return get_completed_groups()
- return groups
+def parse_typed_arg(arg, allowed_types, default_type):
+ """
+ Get (type, value) from a typed commandline argument.
+
+ Split the argument by the type separator and return the type and the value.
+ Raise CmdLineInputError in the argument format or type is not valid.
+ string arg -- commandline argument
+ Iterable allowed_types -- list of allowed argument types
+ string default_type -- type to return if the argument doesn't specify a type
+ """
+ if ARG_TYPE_DELIMITER not in arg:
+ return default_type, arg
+ arg_type, arg_value = arg.split(ARG_TYPE_DELIMITER, 1)
+ if not arg_type:
+ return default_type, arg_value
+ if arg_type not in allowed_types:
+ raise CmdLineInputError(
+ "'{arg_type}' is not an allowed type for '{arg_full}', use {hint}"
+ .format(
+ arg_type=arg_type,
+ arg_full=arg,
+ hint=", ".join(sorted(allowed_types))
+ )
+ )
+ return arg_type, arg_value
+
+def is_num(arg):
+ return arg.isdigit() or arg.lower() == "infinity"
+
+def is_negative_num(arg):
+ return arg.startswith("-") and is_num(arg[1:])
+
+def is_short_option_expecting_value(arg):
+ return (
+ len(arg) == 2
+ and
+ arg[0] == "-"
+ and
+ "{0}:".format(arg[1]) in PCS_SHORT_OPTIONS
+ )
+
+def is_long_option_expecting_value(arg):
+ return (
+ len(arg) > 2
+ and
+ arg[0:2] == "--"
+ and
+ "{0}=".format(arg[2:]) in PCS_LONG_OPTIONS
+ )
+
+def is_option_expecting_value(arg):
+ return (
+ is_short_option_expecting_value(arg)
+ or
+ is_long_option_expecting_value(arg)
+ )
+
+def filter_out_non_option_negative_numbers(arg_list):
+ """
+ Return arg_list without non-option negative numbers.
+ Negative numbers following the option expecting value are kept.
+
+ There is the problematic legacy.
+ Argumet "--" has special meaning: can be used to signal that no more
+ options will follow. This would solve the problem with negative numbers in
+ a standard way: there would be no special approach to negative numbers,
+ everything would be left in the hands of users. But now it would be
+ backward incompatible change.
+
+ list arg_list contains command line arguments
+ """
+ args_without_negative_nums = []
+ for i, arg in enumerate(arg_list):
+ prev_arg = arg_list[i-1] if i > 0 else ""
+ if not is_negative_num(arg) or is_option_expecting_value(prev_arg):
+ args_without_negative_nums.append(arg)
+
+ return args_without_negative_nums
+
+def filter_out_options(arg_list):
+ """
+ Return arg_list without options and its negative numbers.
+
+ list arg_list contains command line arguments
+ """
+ args_without_options = []
+ for i, arg in enumerate(arg_list):
+ prev_arg = arg_list[i-1] if i > 0 else ""
+ if(
+ not is_option_expecting_value(prev_arg)
+ and (
+ not arg.startswith("-")
+ or
+ arg == "-"
+ or
+ is_negative_num(arg)
+ )
+ ):
+ args_without_options.append(arg)
+ return args_without_options
+
+def upgrade_args(arg_list):
+ """
+ Return modified copy of arg_list.
+ This function transform some old syntax to new syntax to keep backward
+ compatibility.
+
+ list arg_list contains command line arguments
+ """
+ upgraded_args = []
+ args_without_options = filter_out_options(arg_list)
+ for arg in arg_list:
+ if arg in ["--cloneopt", "--clone"]:
+ #for every commands - kept as it was previously
+ upgraded_args.append("clone")
+ elif arg.startswith("--cloneopt="):
+ #for every commands - kept as it was previously
+ upgraded_args.append("clone")
+ upgraded_args.append(arg.split('=', 1)[1])
+ elif(
+ #only for resource create - currently the only known problematic
+ #place
+ arg == "--master"
+ and
+ args_without_options[:2] == ["resource", "create"]
+ ):
+ upgraded_args.append("master")
+ else:
+ upgraded_args.append(arg)
+ return upgraded_args
diff --git a/pcs/cli/common/reports.py b/pcs/cli/common/reports.py
index 3178532..064be11 100644
--- a/pcs/cli/common/reports.py
+++ b/pcs/cli/common/reports.py
@@ -6,6 +6,7 @@ from __future__ import (
)
import sys
+import inspect
from functools import partial
from pcs.cli.booth.console_report import (
@@ -36,45 +37,74 @@ def build_message_from_report(code_builder_map, report_item, force_text=""):
if report_item.code not in code_builder_map:
return build_default_message_from_report(report_item, force_text)
- template = code_builder_map[report_item.code]
+ message = code_builder_map[report_item.code]
#Sometimes report item info is not needed for message building.
- #In this case template is string. Otherwise, template is callable.
- if callable(template):
- try:
- template = template(report_item.info)
- except(TypeError, KeyError):
- return build_default_message_from_report(report_item, force_text)
-
+ #In that case the message is a string. Otherwise the message is a callable.
+ if not callable(message):
+ return message + force_text
+
+ try:
+ # Object functools.partial cannot be used with inspect because it is not
+ # regular python function. We have to use original function for that.
+ if isinstance(message, partial):
+ keywords = message.keywords if message.keywords is not None else {}
+ args = inspect.getargspec(message.func).args
+ del args[:len(message.args)]
+ args = [arg for arg in args if arg not in keywords]
+ else:
+ args = inspect.getargspec(message).args
+ if "force_text" in args:
+ return message(report_item.info, force_text)
+ return message(report_item.info) + force_text
+ except(TypeError, KeyError):
+ return build_default_message_from_report(report_item, force_text)
- #Message can contain {force} placeholder if there is need to have it on
- #specific position. Otherwise is appended to the end (if necessary). This
- #removes the need to explicitly specify placeholder for each message.
- if force_text and "{force}" not in template:
- template += "{force}"
- return template.format(force=force_text)
build_report_message = partial(build_message_from_report, __CODE_BUILDER_MAP)
class LibraryReportProcessorToConsole(object):
def __init__(self, debug=False):
self.debug = debug
+ self.items = []
+
+ def append(self, report_item):
+ self.items.append(report_item)
+ return self
+
+ def extend(self, report_item_list):
+ self.items.extend(report_item_list)
+ return self
+
+ @property
+ def errors_count(self):
+ return len([
+ item for item in self.items
+ if item.severity == ReportItemSeverity.ERROR
+ ])
def process(self, report_item):
- self.process_list([report_item])
+ self.append(report_item)
+ self.send()
def process_list(self, report_item_list):
+ self.extend(report_item_list)
+ self.send()
+
+ def send(self):
errors = []
- for report_item in report_item_list:
+ for report_item in self.items:
if report_item.severity == ReportItemSeverity.ERROR:
errors.append(report_item)
elif report_item.severity == ReportItemSeverity.WARNING:
print("Warning: " + build_report_message(report_item))
elif self.debug or report_item.severity != ReportItemSeverity.DEBUG:
print(build_report_message(report_item))
+ self.items = []
if errors:
raise LibraryError(*errors)
+
def _prepare_force_text(report_item):
if report_item.forceable == codes.SKIP_OFFLINE_NODES:
return ", use --skip-offline to override"
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
index 746dfe0..d80aee1 100644
--- a/pcs/cli/common/test/test_console_report.py
+++ b/pcs/cli/common/test/test_console_report.py
@@ -12,6 +12,11 @@ from pcs.cli.common.console_report import(
format_optional,
)
from pcs.common import report_codes as codes
+from pcs.common.fencing_topology import (
+ TARGET_TYPE_NODE,
+ TARGET_TYPE_REGEXP,
+ TARGET_TYPE_ATTRIBUTE,
+)
class IndentTest(TestCase):
def test_indent_list_of_lines(self):
@@ -43,7 +48,7 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
self.assert_message_from_info(
"invalid TYPE option 'NAME', allowed options are: FIRST, SECOND",
{
- "option_name": "NAME",
+ "option_names": ["NAME"],
"option_type": "TYPE",
"allowed": sorted(["FIRST", "SECOND"]),
}
@@ -53,12 +58,51 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
self.assert_message_from_info(
"invalid option 'NAME', allowed options are: FIRST, SECOND",
{
- "option_name": "NAME",
+ "option_names": ["NAME"],
"option_type": "",
"allowed": sorted(["FIRST", "SECOND"]),
}
)
+ def test_build_message_with_multiple_names(self):
+ self.assert_message_from_info(
+ "invalid options: 'ANOTHER', 'NAME', allowed option is FIRST",
+ {
+ "option_names": ["NAME", "ANOTHER"],
+ "option_type": "",
+ "allowed": ["FIRST"],
+ }
+ )
+
+class RequiredOptionIsMissing(NameBuildTest):
+ code = codes.REQUIRED_OPTION_IS_MISSING
+ def test_build_message_with_type(self):
+ self.assert_message_from_info(
+ "required TYPE option 'NAME' is missing",
+ {
+ "option_names": ["NAME"],
+ "option_type": "TYPE",
+ }
+ )
+
+ def test_build_message_without_type(self):
+ self.assert_message_from_info(
+ "required option 'NAME' is missing",
+ {
+ "option_names": ["NAME"],
+ "option_type": "",
+ }
+ )
+
+ def test_build_message_with_multiple_names(self):
+ self.assert_message_from_info(
+ "required options 'ANOTHER', 'NAME' are missing",
+ {
+ "option_names": ["NAME", "ANOTHER"],
+ "option_type": "",
+ }
+ )
+
class BuildInvalidOptionValueMessageTest(NameBuildTest):
code = codes.INVALID_OPTION_VALUE
def test_build_message_with_multiple_allowed_values(self):
@@ -155,13 +199,23 @@ class BuildInvalidIdTest(NameBuildTest):
}
)
-class BuildRunExternalaStartedTest(NameBuildTest):
+class BuildRunExternalStartedTest(NameBuildTest):
code = codes.RUN_EXTERNAL_PROCESS_STARTED
+ def test_build_message_minimal(self):
+ self.assert_message_from_info(
+ "Running: COMMAND\nEnvironment:\n",
+ {
+ "command": "COMMAND",
+ "stdin": "",
+ "environment": dict(),
+ }
+ )
+
def test_build_message_with_stdin(self):
self.assert_message_from_info(
(
- "Running: COMMAND\n"
+ "Running: COMMAND\nEnvironment:\n"
"--Debug Input Start--\n"
"STDIN\n"
"--Debug Input End--\n"
@@ -169,18 +223,58 @@ class BuildRunExternalaStartedTest(NameBuildTest):
{
"command": "COMMAND",
"stdin": "STDIN",
+ "environment": dict(),
}
)
- def test_build_message_without_stdin(self):
+ def test_build_message_with_env(self):
self.assert_message_from_info(
- "Running: COMMAND\n",
+ (
+ "Running: COMMAND\nEnvironment:\n"
+ " env_a=A\n"
+ " env_b=B\n"
+ ),
{
"command": "COMMAND",
"stdin": "",
+ "environment": {"env_a": "A", "env_b": "B",},
+ }
+ )
+
+ def test_build_message_maximal(self):
+ self.assert_message_from_info(
+ (
+ "Running: COMMAND\nEnvironment:\n"
+ " env_a=A\n"
+ " env_b=B\n"
+ "--Debug Input Start--\n"
+ "STDIN\n"
+ "--Debug Input End--\n"
+ ),
+ {
+ "command": "COMMAND",
+ "stdin": "STDIN",
+ "environment": {"env_a": "A", "env_b": "B",},
+ }
+ )
+
+ def test_insidious_environment(self):
+ self.assert_message_from_info(
+ (
+ "Running: COMMAND\nEnvironment:\n"
+ " test=a:{green},b:{red}\n"
+ "--Debug Input Start--\n"
+ "STDIN\n"
+ "--Debug Input End--\n"
+ ),
+ {
+ "command": "COMMAND",
+ "stdin": "STDIN",
+ "environment": {"test": "a:{green},b:{red}",},
}
)
+
class BuildNodeCommunicationStartedTest(NameBuildTest):
code = codes.NODE_COMMUNICATION_STARTED
@@ -214,6 +308,10 @@ class FormatOptionalTest(TestCase):
def test_info_key_is_not_falsy(self):
self.assertEqual("A: ", format_optional("A", "{0}: "))
+ def test_default_value(self):
+ self.assertEqual("DEFAULT", format_optional("", "{0}: ", "DEFAULT"))
+
+
class AgentNameGuessedTest(NameBuildTest):
code = codes.AGENT_NAME_GUESSED
def test_build_message_with_data(self):
@@ -230,12 +328,834 @@ class InvalidResourceAgentNameTest(NameBuildTest):
def test_build_message_with_data(self):
self.assert_message_from_info(
"Invalid resource agent name ':name'."
- " Use standard:provider:type or standard:type."
- " List of standards and providers can be obtained by using"
- " commands 'pcs resource standards' and"
+ " Use standard:provider:type when standard is 'ocf' or"
+ " standard:type otherwise. List of standards and providers can"
+ " be obtained by using commands 'pcs resource standards' and"
" 'pcs resource providers'"
,
{
"name": ":name",
}
)
+
+class InvalidiStonithAgentNameTest(NameBuildTest):
+ code = codes.INVALID_STONITH_AGENT_NAME
+ def test_build_message_with_data(self):
+ self.assert_message_from_info(
+ "Invalid stonith agent name 'fence:name'. List of agents can be"
+ " obtained by using command 'pcs stonith list'. Do not use the"
+ " 'stonith:' prefix. Agent name cannot contain the ':'"
+ " character."
+ ,
+ {
+ "name": "fence:name",
+ }
+ )
+
+class InvalidOptionType(NameBuildTest):
+ code = codes.INVALID_OPTION_TYPE
+ def test_allowed_string(self):
+ self.assert_message_from_info(
+ "specified option name is not valid, use allowed types",
+ {
+ "option_name": "option name",
+ "allowed_types": "allowed types",
+ }
+ )
+
+ def test_allowed_list(self):
+ self.assert_message_from_info(
+ "specified option name is not valid, use allowed, types",
+ {
+ "option_name": "option name",
+ "allowed_types": ["allowed", "types"],
+ }
+ )
+
+class StonithResourcesDoNotExist(NameBuildTest):
+ code = codes.STONITH_RESOURCES_DO_NOT_EXIST
+ def test_success(self):
+ self.assert_message_from_info(
+ "Stonith resource(s) 'device1', 'device2' do not exist",
+ {
+ "stonith_ids": ["device1", "device2"],
+ }
+ )
+
+class FencingLevelAlreadyExists(NameBuildTest):
+ code = codes.CIB_FENCING_LEVEL_ALREADY_EXISTS
+ def test_target_node(self):
+ self.assert_message_from_info(
+ "Fencing level for 'nodeA' at level '1' with device(s) "
+ "'device1,device2' already exists",
+ {
+ "level": "1",
+ "target_type": TARGET_TYPE_NODE,
+ "target_value": "nodeA",
+ "devices": ["device1", "device2"],
+ }
+ )
+
+ def test_target_pattern(self):
+ self.assert_message_from_info(
+ "Fencing level for 'node-\d+' at level '1' with device(s) "
+ "'device1,device2' already exists",
+ {
+ "level": "1",
+ "target_type": TARGET_TYPE_REGEXP,
+ "target_value": "node-\d+",
+ "devices": ["device1", "device2"],
+ }
+ )
+
+ def test_target_attribute(self):
+ self.assert_message_from_info(
+ "Fencing level for 'name=value' at level '1' with device(s) "
+ "'device1,device2' already exists",
+ {
+ "level": "1",
+ "target_type": TARGET_TYPE_ATTRIBUTE,
+ "target_value": ("name", "value"),
+ "devices": ["device1", "device2"],
+ }
+ )
+
+class FencingLevelDoesNotExist(NameBuildTest):
+ code = codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST
+ def test_full_info(self):
+ self.assert_message_from_info(
+ "Fencing level for 'nodeA' at level '1' with device(s) "
+ "'device1,device2' does not exist",
+ {
+ "level": "1",
+ "target_type": TARGET_TYPE_NODE,
+ "target_value": "nodeA",
+ "devices": ["device1", "device2"],
+ }
+ )
+
+ def test_only_level(self):
+ self.assert_message_from_info(
+ "Fencing level at level '1' does not exist",
+ {
+ "level": "1",
+ "target_type": None,
+ "target_value": None,
+ "devices": None,
+ }
+ )
+
+ def test_only_target(self):
+ self.assert_message_from_info(
+ "Fencing level for 'name=value' does not exist",
+ {
+ "level": None,
+ "target_type": TARGET_TYPE_ATTRIBUTE,
+ "target_value": ("name", "value"),
+ "devices": None,
+ }
+ )
+
+ def test_only_devices(self):
+ self.assert_message_from_info(
+ "Fencing level with device(s) 'device1,device2' does not exist",
+ {
+ "level": None,
+ "target_type": None,
+ "target_value": None,
+ "devices": ["device1", "device2"],
+ }
+ )
+
+ def test_no_info(self):
+ self.assert_message_from_info(
+ "Fencing level does not exist",
+ {
+ "level": None,
+ "target_type": None,
+ "target_value": None,
+ "devices": None,
+ }
+ )
+
+
+class ResourceBundleAlreadyContainsAResource(NameBuildTest):
+ code = codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE
+ def test_build_message_with_data(self):
+ self.assert_message_from_info(
+ (
+ "bundle 'test_bundle' already contains resource "
+ "'test_resource', a bundle may contain at most one resource"
+ ),
+ {
+ "resource_id": "test_resource",
+ "bundle_id": "test_bundle",
+ }
+ )
+
+
+class ResourceOperationIntevalDuplicationTest(NameBuildTest):
+ code = codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION
+ def test_build_message_with_data(self):
+ self.assert_message_from_info(
+ "multiple specification of the same operation with the same"
+ " interval:"
+ "\nmonitor with intervals 3600s, 60m, 1h"
+ "\nmonitor with intervals 60s, 1m"
+ ,
+ {
+ "duplications": {
+ "monitor": [
+ ["3600s", "60m", "1h"],
+ ["60s", "1m"],
+ ],
+ },
+ }
+ )
+
+class ResourceOperationIntevalAdaptedTest(NameBuildTest):
+ code = codes.RESOURCE_OPERATION_INTERVAL_ADAPTED
+ def test_build_message_with_data(self):
+ self.assert_message_from_info(
+ "changing a monitor operation interval from 10 to 11 to make the"
+ " operation unique"
+ ,
+ {
+ "operation_name": "monitor",
+ "original_interval": "10",
+ "adapted_interval": "11",
+ }
+ )
+
+class IdBelongsToUnexpectedType(NameBuildTest):
+ code = codes.ID_BELONGS_TO_UNEXPECTED_TYPE
+ def test_build_message_with_data(self):
+ self.assert_message_from_info("'ID' is not primitive/master/clone", {
+ "id": "ID",
+ "expected_types": ["primitive", "master", "clone"],
+ "current_type": "op",
+ })
+
+ def test_build_message_with_transformation(self):
+ self.assert_message_from_info("'ID' is not a group", {
+ "id": "ID",
+ "expected_types": ["group"],
+ "current_type": "op",
+ })
+
+class ResourceRunOnNodes(NameBuildTest):
+ code = codes.RESOURCE_RUNNING_ON_NODES
+ def test_one_node(self):
+ self.assert_message_from_info(
+ "resource 'R' is running on node 'node1'",
+ {
+ "resource_id": "R",
+ "roles_with_nodes": {"Started": ["node1"]},
+ }
+ )
+ def test_multiple_nodes(self):
+ self.assert_message_from_info(
+ "resource 'R' is running on nodes 'node1', 'node2'",
+ {
+ "resource_id": "R",
+ "roles_with_nodes": {"Started": ["node1","node2"]},
+ }
+ )
+ def test_multiple_role_multiple_nodes(self):
+ self.assert_message_from_info(
+ "resource 'R' is master on node 'node3'"
+ "; running on nodes 'node1', 'node2'"
+ ,
+ {
+ "resource_id": "R",
+ "roles_with_nodes": {
+ "Started": ["node1","node2"],
+ "Master": ["node3"],
+ },
+ }
+ )
+
+class ResourceDoesNotRun(NameBuildTest):
+ code = codes.RESOURCE_DOES_NOT_RUN
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "resource 'R' is not running on any node",
+ {
+ "resource_id": "R",
+ }
+ )
+
+class MutuallyExclusiveOptions(NameBuildTest):
+ code = codes.MUTUALLY_EXCLUSIVE_OPTIONS
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Only one of some options 'a' and 'b' can be used",
+ {
+ "option_type": "some",
+ "option_names": ["a", "b"],
+ }
+ )
+
+class ResourceIsUnmanaged(NameBuildTest):
+ code = codes.RESOURCE_IS_UNMANAGED
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "'R' is unmanaged",
+ {
+ "resource_id": "R",
+ }
+ )
+
+class ResourceManagedNoMonitorEnabled(NameBuildTest):
+ code = codes.RESOURCE_MANAGED_NO_MONITOR_ENABLED
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Resource 'R' has no enabled monitor operations."
+ " Re-run with '--monitor' to enable them."
+ ,
+ {
+ "resource_id": "R",
+ }
+ )
+
+class NodeIsInCluster(NameBuildTest):
+ code = codes.CANNOT_ADD_NODE_IS_IN_CLUSTER
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "cannot add the node 'N1' because it is in a cluster",
+ {
+ "node": "N1",
+ }
+ )
+
+class NodeIsRunningPacemakerRemote(NameBuildTest):
+ code = codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "cannot add the node 'N1' because it is running service"
+ " 'pacemaker_remote' (is not the node already in a cluster?)"
+ ,
+ {
+ "node": "N1",
+ "service": "pacemaker_remote",
+ }
+ )
+ def test_build_message_with_unknown_service(self):
+ self.assert_message_from_info(
+ "cannot add the node 'N1' because it is running service 'unknown'",
+ {
+ "node": "N1",
+ "service": "unknown",
+ }
+ )
+
+
+class SbdDeviceInitializationStarted(NameBuildTest):
+ code = codes.SBD_DEVICE_INITIALIZATION_STARTED
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Initializing device(s) /dev1, /dev2, /dev3...",
+ {
+ "device_list": ["/dev1", "/dev2", "/dev3"],
+ }
+ )
+
+
+class SbdDeviceInitializationError(NameBuildTest):
+ code = codes.SBD_DEVICE_INITIALIZATION_ERROR
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Initialization of device(s) failed: this is reason",
+ {
+ "reason": "this is reason"
+ }
+ )
+
+
+class SbdDeviceListError(NameBuildTest):
+ code = codes.SBD_DEVICE_LIST_ERROR
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Unable to get list of messages from device '/dev': this is reason",
+ {
+ "device": "/dev",
+ "reason": "this is reason",
+ }
+ )
+
+
+class SbdDeviceMessageError(NameBuildTest):
+ code = codes.SBD_DEVICE_MESSAGE_ERROR
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Unable to set message 'test' for node 'node1' on device '/dev1'",
+ {
+ "message": "test",
+ "node": "node1",
+ "device": "/dev1",
+ }
+ )
+
+
+class SbdDeviceDumpError(NameBuildTest):
+ code = codes.SBD_DEVICE_DUMP_ERROR
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Unable to get SBD headers from device '/dev1': this is reason",
+ {
+ "device": "/dev1",
+ "reason": "this is reason",
+ }
+ )
+
+
+class SbdDevcePathNotAbsolute(NameBuildTest):
+ code = codes.SBD_DEVICE_PATH_NOT_ABSOLUTE
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "Device path '/dev' on node 'node1' is not absolute",
+ {
+ "device": "/dev",
+ "node": "node1",
+ }
+ )
+
+ def test_build_message_without_node(self):
+ self.assert_message_from_info(
+ "Device path '/dev' is not absolute",
+ {
+ "device": "/dev",
+ "node": None,
+ }
+ )
+
+
+class SbdDeviceDoesNotExist(NameBuildTest):
+ code = codes.SBD_DEVICE_DOES_NOT_EXIST
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "node1: device '/dev' not found",
+ {
+ "node": "node1",
+ "device": "/dev",
+ }
+ )
+
+
+class SbdDeviceISNotBlockDevice(NameBuildTest):
+ code = codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "node1: device '/dev' is not a block device",
+ {
+ "node": "node1",
+ "device": "/dev",
+ }
+ )
+
+
+class SbdNoDEviceForNode(NameBuildTest):
+ code = codes.SBD_NO_DEVICE_FOR_NODE
+ def test_build_message(self):
+ self.assert_message_from_info(
+ "No device defined for node 'node1'",
+ {
+ "node": "node1",
+ }
+ )
+
+
+class SbdTooManyDevicesForNode(NameBuildTest):
+ code = codes.SBD_TOO_MANY_DEVICES_FOR_NODE
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "More than 3 devices defined for node 'node1' (devices: /dev1, "
+ "/dev2, /dev3)",
+ {
+ "max_devices": 3,
+ "node": "node1",
+ "device_list": ["/dev1", "/dev2", "/dev3"]
+ }
+ )
+
+class RequiredOptionOfAlternativesIsMissing(NameBuildTest):
+ code = codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING
+ def test_without_type(self):
+ self.assert_message_from_info(
+ "option 'aAa' or 'bBb' or 'cCc' has to be specified",
+ {
+ "option_names": ["aAa", "bBb", "cCc"],
+ }
+ )
+
+ def test_with_type(self):
+ self.assert_message_from_info(
+ "test option 'aAa' or 'bBb' or 'cCc' has to be specified",
+ {
+ "option_type": "test",
+ "option_names": ["aAa", "bBb", "cCc"],
+ }
+ )
+
+class PrerequisiteOptionIsMissing(NameBuildTest):
+ code = codes.PREREQUISITE_OPTION_IS_MISSING
+ def test_without_type(self):
+ self.assert_message_from_info(
+ "If option 'a' is specified, option 'b' must be specified as well",
+ {
+ "option_name": "a",
+ "prerequisite_name": "b",
+ }
+ )
+
+ def test_with_type(self):
+ self.assert_message_from_info(
+ "If some option 'a' is specified, "
+ "other option 'b' must be specified as well"
+ ,
+ {
+ "option_name": "a",
+ "option_type": "some",
+ "prerequisite_name": "b",
+ "prerequisite_type": "other",
+ }
+ )
+
+class FileDistributionStarted(NameBuildTest):
+ code = codes.FILES_DISTRIBUTION_STARTED
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "Sending 'first', 'second'",
+ {
+ "file_list": ["first", "second"],
+ "node_list": None,
+ "description": None,
+ }
+ )
+
+ def test_build_messages_with_nodes(self):
+ self.assert_message_from_info(
+ "Sending 'first', 'second' to 'node1', 'node2'",
+ {
+ "file_list": ["first", "second"],
+ "node_list": ["node1", "node2"],
+ "description": None,
+ }
+ )
+
+ def test_build_messages_with_description(self):
+ self.assert_message_from_info(
+ "Sending configuration files to 'node1', 'node2'",
+ {
+ "file_list": ["first", "second"],
+ "node_list": ["node1", "node2"],
+ "description": "configuration files",
+ }
+ )
+
+class FileDistributionSucess(NameBuildTest):
+ code = codes.FILE_DISTRIBUTION_SUCCESS
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "node1: successful distribution of the file 'some authfile'",
+ {
+ "nodes_success_files": None,
+ "node": "node1",
+ "file_description": "some authfile",
+ }
+ )
+
+class FileDistributionError(NameBuildTest):
+ code = codes.FILE_DISTRIBUTION_ERROR
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "node1: unable to distribute file 'file1': permission denied",
+ {
+ "node_file_errors": None,
+ "node": "node1",
+ "file_description": "file1",
+ "reason": "permission denied",
+ }
+ )
+
+class FileRemoveFromNodeStarted(NameBuildTest):
+ code = codes.FILES_REMOVE_FROM_NODE_STARTED
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "Requesting remove 'first', 'second' from 'node1', 'node2'",
+ {
+ "file_list": ["first", "second"],
+ "node_list": ["node1", "node2"],
+ "description": None,
+ }
+ )
+
+ def test_build_messages_with_description(self):
+ self.assert_message_from_info(
+ "Requesting remove remote configuration files from 'node1',"
+ " 'node2'"
+ ,
+ {
+ "file_list": ["first", "second"],
+ "node_list": ["node1", "node2"],
+ "description": "remote configuration files",
+ }
+ )
+
+class FileRemoveFromNodeSucess(NameBuildTest):
+ code = codes.FILE_REMOVE_FROM_NODE_SUCCESS
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "node1: successful removal of the file 'some authfile'",
+ {
+ "nodes_success_files": None,
+ "node": "node1",
+ "file_description": "some authfile",
+ }
+ )
+
+class FileRemoveFromNodeError(NameBuildTest):
+ code = codes.FILE_REMOVE_FROM_NODE_ERROR
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "node1: unable to remove file 'file1': permission denied",
+ {
+ "node_file_errors": None,
+ "node": "node1",
+ "file_description": "file1",
+ "reason": "permission denied",
+ }
+ )
+
+
+class ActionsOnNodesStarted(NameBuildTest):
+ code = codes.SERVICE_COMMANDS_ON_NODES_STARTED
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "Requesting 'first', 'second'",
+ {
+ "action_list": ["first", "second"],
+ "node_list": None,
+ "description": None,
+ }
+ )
+
+ def test_build_messages_with_nodes(self):
+ self.assert_message_from_info(
+ "Requesting 'first', 'second' on 'node1', 'node2'",
+ {
+ "action_list": ["first", "second"],
+ "node_list": ["node1", "node2"],
+ "description": None,
+ }
+ )
+
+ def test_build_messages_with_description(self):
+ self.assert_message_from_info(
+ "Requesting running pacemaker_remote on 'node1', 'node2'",
+ {
+ "action_list": ["first", "second"],
+ "node_list": ["node1", "node2"],
+ "description": "running pacemaker_remote",
+ }
+ )
+
+class ActionsOnNodesSuccess(NameBuildTest):
+ code = codes.SERVICE_COMMAND_ON_NODE_SUCCESS
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "node1: successful run of 'service enable'",
+ {
+ "nodes_success_actions": None,
+ "node": "node1",
+ "service_command_description": "service enable",
+ }
+ )
+
+class ActionOnNodesError(NameBuildTest):
+ code = codes.SERVICE_COMMAND_ON_NODE_ERROR
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "node1: service command failed: service1 start: permission denied",
+ {
+ "node_action_errors": None,
+ "node": "node1",
+ "service_command_description": "service1 start",
+ "reason": "permission denied",
+ }
+ )
+
+class resource_is_guest_node_already(NameBuildTest):
+ code = codes.RESOURCE_IS_GUEST_NODE_ALREADY
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "the resource 'some-resource' is already a guest node",
+ {"resource_id": "some-resource"}
+ )
+
+class live_environment_required(NameBuildTest):
+ code = codes.LIVE_ENVIRONMENT_REQUIRED
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "This command does not support '--corosync_conf'",
+ {
+ "forbidden_options": ["--corosync_conf"]
+ }
+ )
+
+ def test_build_messages_transformable_codes(self):
+ self.assert_message_from_info(
+ "This command does not support '--corosync_conf', '-f'",
+ {
+ "forbidden_options": ["COROSYNC_CONF", "CIB"]
+ }
+ )
+
+class nolive_skip_files_distribution(NameBuildTest):
+ code = codes.NOLIVE_SKIP_FILES_DISTRIBUTION
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "the distribution of 'file1', 'file2' to 'node1', 'node2' was"
+ " skipped because command"
+ " does not run on live cluster (e.g. -f was used)."
+ " You will have to do it manually."
+ ,
+ {
+ "files_description": ["file1", 'file2'],
+ "nodes": ["node1", "node2"],
+ }
+ )
+
+class nolive_skip_files_remove(NameBuildTest):
+ code = codes.NOLIVE_SKIP_FILES_REMOVE
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "'file1', 'file2' remove from 'node1', 'node2'"
+ " was skipped because command"
+ " does not run on live cluster (e.g. -f was used)."
+ " You will have to do it manually."
+ ,
+ {
+ "files_description": ["file1", 'file2'],
+ "nodes": ["node1", "node2"],
+ }
+ )
+
+class nolive_skip_service_command_on_nodes(NameBuildTest):
+ code = codes.NOLIVE_SKIP_SERVICE_COMMAND_ON_NODES
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "running 'pacemaker_remote start' on 'node1', 'node2' was skipped"
+ " because command does not run on live cluster (e.g. -f was"
+ " used). You will have to run it manually."
+ ,
+ {
+ "service": "pacemaker_remote",
+ "command": "start",
+ "nodes": ["node1", "node2"]
+ }
+ )
+
+class NodeNotFound(NameBuildTest):
+ code = codes.NODE_NOT_FOUND
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "Node 'SOME_NODE' does not appear to exist in configuration",
+ {
+ "node": "SOME_NODE",
+ "searched_types": []
+ }
+ )
+
+ def test_build_messages_with_one_search_types(self):
+ self.assert_message_from_info(
+ "remote node 'SOME_NODE' does not appear to exist in configuration",
+ {
+ "node": "SOME_NODE",
+ "searched_types": ["remote"]
+ }
+ )
+
+ def test_build_messages_with_string_search_types(self):
+ self.assert_message_from_info(
+ "remote node 'SOME_NODE' does not appear to exist in configuration",
+ {
+ "node": "SOME_NODE",
+ "searched_types": "remote"
+ }
+ )
+
+ def test_build_messages_with_multiple_search_types(self):
+ self.assert_message_from_info(
+ "nor remote node or guest node 'SOME_NODE' does not appear to exist"
+ " in configuration"
+ ,
+ {
+ "node": "SOME_NODE",
+ "searched_types": ["remote", "guest"]
+ }
+ )
+
+class MultipleResultFound(NameBuildTest):
+ code = codes.MULTIPLE_RESULTS_FOUND
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "multiple resource for 'NODE-NAME' found: 'ID1', 'ID2'",
+ {
+ "result_type": "resource",
+ "result_identifier_list": ["ID1", "ID2"],
+ "search_description": "NODE-NAME",
+ }
+ )
+
+class UseCommandNodeAddRemote(NameBuildTest):
+ code = codes.USE_COMMAND_NODE_ADD_REMOTE
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "this command is not sufficient for creating a remote connection,"
+ " use 'pcs cluster node add-remote'"
+ ,
+ {}
+ )
+
+class UseCommandNodeAddGuest(NameBuildTest):
+ code = codes.USE_COMMAND_NODE_ADD_GUEST
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "this command is not sufficient for creating a guest node, use "
+ "'pcs cluster node add-guest'",
+ {}
+ )
+
+class UseCommandNodeRemoveGuest(NameBuildTest):
+ code = codes.USE_COMMAND_NODE_REMOVE_GUEST
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "this command is not sufficient for removing a guest node, use "
+ "'pcs cluster node remove-guest'",
+ {}
+ )
+
+class NodeRemoveInPacemakerFailed(NameBuildTest):
+ code = codes.NODE_REMOVE_IN_PACEMAKER_FAILED
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "unable to remove node 'NODE' from pacemaker: reason",
+ {
+ "node_name": "NODE",
+ "reason": "reason"
+ }
+ )
+
+class NodeToClearIsStillInCluster(NameBuildTest):
+ code = codes.NODE_TO_CLEAR_IS_STILL_IN_CLUSTER
+ def test_build_messages(self):
+ self.assert_message_from_info(
+ "node 'node1' seems to be still in the cluster"
+ "; this command should be used only with nodes that have been"
+ " removed from the cluster"
+ ,
+ {
+ "node": "node1"
+ }
+ )
diff --git a/pcs/cli/common/test/test_env_file.py b/pcs/cli/common/test/test_env_file.py
new file mode 100644
index 0000000..e0104bd
--- /dev/null
+++ b/pcs/cli/common/test/test_env_file.py
@@ -0,0 +1,138 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.pcs_unittest import mock
+from pcs.cli.common import env_file
+from pcs.test.tools.misc import create_patcher, create_setup_patch_mixin
+
+from pcs.lib.errors import ReportItem
+from pcs.common import report_codes
+
+patch_env_file = create_patcher(env_file)
+SetupPatchMixin = create_setup_patch_mixin(patch_env_file)
+
+FILE_PATH = "/path/to/local/file"
+
+class Write(TestCase, SetupPatchMixin):
+ def setUp(self):
+ self.mock_open = mock.mock_open()
+ self.mock_error = self.setup_patch("console_report.error")
+
+ def assert_params_causes_calls(self, env_file_dict, calls, path=FILE_PATH):
+ with patch_env_file("open", self.mock_open, create=True):
+ env_file.write(env_file_dict, path)
+ self.assertEqual(self.mock_open.mock_calls, calls)
+
+ def test_sucessfully_write(self):
+ self.assert_params_causes_calls(
+ {"content": "filecontent"},
+ [
+ mock.call(FILE_PATH, "w"),
+ mock.call().write("filecontent"),
+ mock.call().close(),
+ ]
+ )
+
+ def test_sucessfully_write_binary(self):
+ self.assert_params_causes_calls(
+ {"content": "filecontent", "is_binary": True},
+ [
+ mock.call(FILE_PATH, "wb"),
+ mock.call().write("filecontent"),
+ mock.call().close(),
+ ]
+ )
+
+ def test_exit_when_cannot_open_file(self):
+ self.mock_open.side_effect = EnvironmentError()
+ self.mock_error.side_effect = SystemExit()
+ self.assertRaises(
+ SystemExit,
+ lambda: env_file.write({"content": "filecontent"}, FILE_PATH)
+ )
+
+class Read(TestCase, SetupPatchMixin):
+ def setUp(self):
+ self.is_file = self.setup_patch('os.path.isfile')
+ self.mock_open = mock.mock_open(read_data='filecontent')
+ self.mock_error = self.setup_patch("console_report.error")
+
+ def assert_returns_content(self, content, is_file):
+ self.is_file.return_value = is_file
+ with patch_env_file("open", self.mock_open, create=True):
+ self.assertEqual(
+ content,
+ env_file.read(FILE_PATH)
+ )
+
+ def test_successfully_read(self):
+ self.assert_returns_content({"content": "filecontent"}, is_file=True)
+
+ def test_successfully_return_empty_content(self):
+ self.assert_returns_content({"content": None}, is_file=False)
+
+ def test_exit_when_cannot_open_file(self):
+ self.mock_open.side_effect = EnvironmentError()
+ self.mock_error.side_effect = SystemExit()
+ self.assertRaises(SystemExit, lambda: env_file.read(FILE_PATH))
+
+class ProcessNoExistingFileExpectation(TestCase, SetupPatchMixin):
+ def setUp(self):
+ self.exists = self.setup_patch('os.path.exists')
+ self.mock_error = self.setup_patch("console_report.error")
+
+ def run_process(
+ self, no_existing_file_expected, file_exists, overwrite=False
+ ):
+ self.exists.return_value = file_exists
+ env_file.process_no_existing_file_expectation(
+ "role",
+ {
+ "no_existing_file_expected": no_existing_file_expected,
+ "can_overwrite_existing_file": overwrite,
+ },
+ FILE_PATH
+ )
+
+ def test_do_nothing_when_expectation_does_not_conflict(self):
+ self.run_process(no_existing_file_expected=False, file_exists=True)
+ self.run_process(no_existing_file_expected=False, file_exists=False)
+ self.run_process(no_existing_file_expected=True, file_exists=False)
+
+ def test_overwrite_permission_produce_console_warning(self):
+ warn = self.setup_patch("console_report.warn")
+ self.run_process(
+ no_existing_file_expected=True,
+ file_exists=True,
+ overwrite=True
+ )
+ warn.assert_called_once_with("role /path/to/local/file already exists")
+
+ def test_non_overwrittable_conflict_exits(self):
+ self.mock_error.side_effect = SystemExit()
+ self.assertRaises(
+ SystemExit,
+ lambda:
+ self.run_process(no_existing_file_expected=True, file_exists=True)
+ )
+
+class ReportMissing(TestCase):
+ @patch_env_file("console_report.error")
+ def test_report_to_console(self, error):
+ env_file.report_missing("role", "path")
+ error.assert_called_once_with("role 'path' does not exist")
+
+class IsMissingReport(TestCase):
+ def test_regcognize_missing_report(self):
+ self.assertTrue(env_file.is_missing_report(
+ ReportItem.error(
+ report_codes.FILE_DOES_NOT_EXIST,
+ info={"file_role": "role"}
+ ),
+ "role"
+ ))
diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py
index 23704b9..5b79b85 100644
--- a/pcs/cli/common/test/test_parse_args.py
+++ b/pcs/cli/common/test/test_parse_args.py
@@ -7,9 +7,18 @@ from __future__ import (
from pcs.test.tools.pcs_unittest import TestCase
from pcs.cli.common.parse_args import(
- split_list,
- prepare_options,
group_by_keywords,
+ parse_typed_arg,
+ prepare_options,
+ split_list,
+ filter_out_non_option_negative_numbers,
+ filter_out_options,
+ is_num,
+ is_negative_num,
+ is_short_option_expecting_value,
+ is_long_option_expecting_value,
+ is_option_expecting_value,
+ upgrade_args,
)
from pcs.cli.common.errors import CmdLineInputError
@@ -31,6 +40,15 @@ class PrepareOptionsTest(TestCase):
CmdLineInputError, lambda: prepare_options(['=a'])
)
+ def test_refuse_options_with_same_key_and_differend_value(self):
+ self.assertRaises(
+ CmdLineInputError, lambda: prepare_options(['a=a', "a=b"])
+ )
+
+ def test_accept_options_with_ssame_key_and_same_value(self):
+ self.assertEqual({'a': '1'}, prepare_options(["a=1", "a=1"]))
+
+
class SplitListTest(TestCase):
def test_returns_list_with_original_when_separator_not_in_original(self):
self.assertEqual([['a', 'b']], split_list(['a', 'b'], 'c'))
@@ -53,7 +71,7 @@ class SplitByKeywords(TestCase):
group_by_keywords(
[0, "first", 1, 2, "second", 3],
set(["first", "second"]),
- implicit_first_keyword="zero"
+ implicit_first_group_key="zero"
),
{
"zero": [0],
@@ -97,7 +115,7 @@ class SplitByKeywords(TestCase):
group_by_keywords(
[],
set(["first", "second"]),
- implicit_first_keyword="zero",
+ implicit_first_group_key="zero",
),
{
"zero": [],
@@ -106,6 +124,50 @@ class SplitByKeywords(TestCase):
}
)
+ def test_returns_dict_with_empty_lists_for_no_opts_and_only_found_kws(self):
+ self.assertEqual(
+ group_by_keywords(
+ ["first"],
+ set(["first", "second"]),
+ only_found_keywords=True,
+ ),
+ {
+ "first": [],
+ }
+ )
+
+ def test_returns_empty_lists_no_opts_and_only_found_kws_with_grouping(self):
+ self.assertEqual(
+ group_by_keywords(
+ ["second", 1, "second", "second", 2, 3],
+ set(["first", "second"]),
+ group_repeated_keywords=["second"],
+ only_found_keywords=True,
+ ),
+ {
+ "second": [
+ [1],
+ [],
+ [2, 3],
+ ],
+ }
+ )
+
+ def test_empty_repeatable(self):
+ self.assertEqual(
+ group_by_keywords(
+ ["second"],
+ set(["first", "second"]),
+ group_repeated_keywords=["second"],
+ only_found_keywords=True,
+ ),
+ {
+ "second": [
+ [],
+ ],
+ }
+ )
+
def test_allow_keywords_repeating(self):
self.assertEqual(
group_by_keywords(
@@ -124,3 +186,327 @@ class SplitByKeywords(TestCase):
set(["first", "second"]),
keyword_repeat_allowed=False,
))
+
+ def test_group_repeating_keyword_occurences(self):
+ self.assertEqual(
+ group_by_keywords(
+ ["first", 1, 2, "second", 3, "first", 4],
+ set(["first", "second"]),
+ group_repeated_keywords=["first"]
+ ),
+ {
+ "first": [[1, 2], [4]],
+ "second": [3],
+ }
+ )
+
+ def test_raises_on_group_repeated_keywords_inconsistency(self):
+ self.assertRaises(AssertionError, lambda: group_by_keywords(
+ [],
+ set(["first", "second"]),
+ group_repeated_keywords=["first", "third"],
+ implicit_first_group_key="third"
+ ))
+
+ def test_implicit_first_kw_not_applyed_in_the_middle(self):
+ self.assertEqual(
+ group_by_keywords(
+ [1, 2, "first", 3, "zero", 4],
+ set(["first"]),
+ implicit_first_group_key="zero"
+ ),
+ {
+ "zero": [1, 2],
+ "first": [3, "zero", 4],
+ }
+ )
+ def test_implicit_first_kw_applyed_in_the_middle_when_is_in_kwds(self):
+ self.assertEqual(
+ group_by_keywords(
+ [1, 2, "first", 3, "zero", 4],
+ set(["first", "zero"]),
+ implicit_first_group_key="zero"
+ ),
+ {
+ "zero": [1, 2, 4],
+ "first": [3],
+ }
+ )
+
+
+class ParseTypedArg(TestCase):
+ def assert_parse(self, arg, parsed):
+ self.assertEqual(
+ parse_typed_arg(arg, ["t0", "t1", "t2"], "t0"),
+ parsed
+ )
+
+ def test_no_type(self):
+ self.assert_parse("value", ("t0", "value"))
+
+ def test_escape(self):
+ self.assert_parse("%value", ("t0", "value"))
+
+ def test_allowed_type(self):
+ self.assert_parse("t1%value", ("t1", "value"))
+
+ def test_bad_type(self):
+ self.assertRaises(
+ CmdLineInputError,
+ lambda: self.assert_parse("tX%value", "aaa")
+ )
+
+ def test_escape_delimiter(self):
+ self.assert_parse("%%value", ("t0", "%value"))
+ self.assert_parse("%val%ue", ("t0", "val%ue"))
+
+ def test_more_delimiters(self):
+ self.assert_parse("t2%va%lu%e", ("t2", "va%lu%e"))
+ self.assert_parse("t2%%va%lu%e", ("t2", "%va%lu%e"))
+
+class FilterOutNonOptionNegativeNumbers(TestCase):
+ def test_does_not_remove_anything_when_no_negative_numbers(self):
+ args = ["first", "second"]
+ self.assertEqual(args, filter_out_non_option_negative_numbers(args))
+
+ def test_remove_negative_number(self):
+ self.assertEqual(
+ ["first"],
+ filter_out_non_option_negative_numbers(["first", "-1"])
+ )
+
+ def test_remove_negative_infinity(self):
+ self.assertEqual(
+ ["first"],
+ filter_out_non_option_negative_numbers(["first", "-INFINITY"])
+ )
+ self.assertEqual(
+ ["first"],
+ filter_out_non_option_negative_numbers(["first", "-infinity"])
+ )
+
+ def test_not_remove_follower_of_short_signed_option(self):
+ self.assertEqual(
+ ["first", "-f", "-1"],
+ filter_out_non_option_negative_numbers(["first", "-f", "-1"])
+ )
+
+ def test_remove_follower_of_short_unsigned_option(self):
+ self.assertEqual(
+ ["first", "-h"],
+ filter_out_non_option_negative_numbers(["first", "-h", "-1"])
+ )
+
+ def test_not_remove_follower_of_long_signed_option(self):
+ self.assertEqual(
+ ["first", "--name", "-1"],
+ filter_out_non_option_negative_numbers(["first", "--name", "-1"])
+ )
+
+ def test_remove_follower_of_long_unsigned_option(self):
+ self.assertEqual(
+ ["first", "--master"],
+ filter_out_non_option_negative_numbers(["first", "--master", "-1"])
+ )
+
+ def test_does_not_remove_dash(self):
+ self.assertEqual(
+ ["first", "-"],
+ filter_out_non_option_negative_numbers(["first", "-"])
+ )
+
+ def test_does_not_remove_dash_dash(self):
+ self.assertEqual(
+ ["first", "--"],
+ filter_out_non_option_negative_numbers(["first", "--"])
+ )
+
+class FilterOutOptions(TestCase):
+ def test_does_not_remove_anything_when_no_options(self):
+ args = ["first", "second"]
+ self.assertEqual(args, filter_out_options(args))
+
+ def test_remove_unsigned_short_option(self):
+ self.assertEqual(
+ ["first", "second"],
+ filter_out_options(["first", "-h", "second"])
+ )
+
+ def test_remove_signed_short_option_with_value(self):
+ self.assertEqual(
+ ["first"],
+ filter_out_options(["first", "-f", "second"])
+ )
+
+ def test_not_remove_value_of_signed_short_option_when_value_bundled(self):
+ self.assertEqual(
+ ["first", "second"],
+ filter_out_options(["first", "-fvalue", "second"])
+ )
+
+ def test_remove_unsigned_long_option(self):
+ self.assertEqual(
+ ["first", "second"],
+ filter_out_options(["first", "--master", "second"])
+ )
+
+ def test_remove_signed_long_option_with_value(self):
+ self.assertEqual(
+ ["first"],
+ filter_out_options(["first", "--name", "second"])
+ )
+
+ def test_not_remove_value_of_signed_long_option_when_value_bundled(self):
+ self.assertEqual(
+ ["first", "second"],
+ filter_out_options(["first", "--name=value", "second"])
+ )
+
+ def test_does_not_remove_dash(self):
+ self.assertEqual(
+ ["first", "-"],
+ filter_out_options(["first", "-"])
+ )
+
+ def test_remove_dash_dash(self):
+ self.assertEqual(
+ ["first"],
+ filter_out_options(["first", "--"])
+ )
+
+class IsNum(TestCase):
+ def test_returns_true_on_number(self):
+ self.assertTrue(is_num("10"))
+
+ def test_returns_true_on_infinity(self):
+ self.assertTrue(is_num("infinity"))
+
+ def test_returns_false_on_no_number(self):
+ self.assertFalse(is_num("no-num"))
+
+class IsNegativeNum(TestCase):
+ def test_returns_true_on_negative_number(self):
+ self.assertTrue(is_negative_num("-10"))
+
+ def test_returns_true_on_infinity(self):
+ self.assertTrue(is_negative_num("-INFINITY"))
+
+ def test_returns_false_on_positive_number(self):
+ self.assertFalse(is_negative_num("10"))
+
+ def test_returns_false_on_no_number(self):
+ self.assertFalse(is_negative_num("no-num"))
+
+class IsShortOptionExpectingValue(TestCase):
+ def test_returns_true_on_short_option_with_value(self):
+ self.assertTrue(is_short_option_expecting_value("-f"))
+
+ def test_returns_false_on_short_option_without_value(self):
+ self.assertFalse(is_short_option_expecting_value("-h"))
+
+ def test_returns_false_on_unknown_short_option(self):
+ self.assertFalse(is_short_option_expecting_value("-x"))
+
+ def test_returns_false_on_dash(self):
+ self.assertFalse(is_short_option_expecting_value("-"))
+
+ def test_returns_false_on_option_without_dash(self):
+ self.assertFalse(is_short_option_expecting_value("ff"))
+
+ def test_returns_false_on_option_including_value(self):
+ self.assertFalse(is_short_option_expecting_value("-fvalue"))
+
+class IsLongOptionExpectingValue(TestCase):
+ def test_returns_true_on_long_option_with_value(self):
+ self.assertTrue(is_long_option_expecting_value("--name"))
+
+ def test_returns_false_on_long_option_without_value(self):
+ self.assertFalse(is_long_option_expecting_value("--master"))
+
+ def test_returns_false_on_unknown_long_option(self):
+ self.assertFalse(is_long_option_expecting_value("--not-specified-long-opt"))
+
+ def test_returns_false_on_dash_dash(self):
+ self.assertFalse(is_long_option_expecting_value("--"))
+
+ def test_returns_false_on_option_without_dash_dash(self):
+ self.assertFalse(is_long_option_expecting_value("-long-option"))
+
+ def test_returns_false_on_option_including_value(self):
+ self.assertFalse(is_long_option_expecting_value("--name=Name"))
+
+class IsOptionExpectingValue(TestCase):
+ def test_returns_true_on_short_option_with_value(self):
+ self.assertTrue(is_option_expecting_value("-f"))
+
+ def test_returns_true_on_long_option_with_value(self):
+ self.assertTrue(is_option_expecting_value("--name"))
+
+ def test_returns_false_on_short_option_without_value(self):
+ self.assertFalse(is_option_expecting_value("-h"))
+
+ def test_returns_false_on_long_option_without_value(self):
+ self.assertFalse(is_option_expecting_value("--master"))
+
+ def test_returns_false_on_unknown_short_option(self):
+ self.assertFalse(is_option_expecting_value("-x"))
+
+ def test_returns_false_on_unknown_long_option(self):
+ self.assertFalse(is_option_expecting_value("--not-specified-long-opt"))
+
+ def test_returns_false_on_dash(self):
+ self.assertFalse(is_option_expecting_value("-"))
+
+ def test_returns_false_on_dash_dash(self):
+ self.assertFalse(is_option_expecting_value("--"))
+
+ def test_returns_false_on_option_including_value(self):
+ self.assertFalse(is_option_expecting_value("--name=Name"))
+ self.assertFalse(is_option_expecting_value("-fvalue"))
+
+class UpgradeArgs(TestCase):
+ def test_returns_the_same_args_when_no_older_versions_detected(self):
+ args = ["first", "second"]
+ self.assertEqual(args, upgrade_args(args))
+
+ def test_upgrade_2dash_cloneopt(self):
+ self.assertEqual(
+ ["first", "clone", "second"],
+ upgrade_args(["first", "--cloneopt", "second"])
+ )
+
+ def test_upgrade_2dash_clone(self):
+ self.assertEqual(
+ ["first", "clone", "second"],
+ upgrade_args(["first", "--clone", "second"])
+ )
+
+ def test_upgrade_2dash_cloneopt_with_value(self):
+ self.assertEqual(
+ ["first", "clone", "1", "second"],
+ upgrade_args(["first", "--cloneopt=1", "second"])
+ )
+
+ def test_upgrade_2dash_master_in_resource_create(self):
+ self.assertEqual(
+ ["resource", "create", "master", "second"],
+ upgrade_args(["resource", "create", "--master", "second"])
+ )
+
+ def test_dont_upgrade_2dash_master_outside_of_resource_create(self):
+ self.assertEqual(
+ ["first", "--master", "second"],
+ upgrade_args(["first", "--master", "second"])
+ )
+
+ def test_upgrade_2dash_master_in_resource_create_with_complications(self):
+ self.assertEqual(
+ [
+ "-f", "path/to/file", "resource", "-V", "create", "master",
+ "second"
+ ],
+ upgrade_args([
+ "-f", "path/to/file", "resource", "-V", "create", "--master",
+ "second"
+ ])
+ )
diff --git a/pcs/cli/common/test/test_reports.py b/pcs/cli/common/test/test_reports.py
index 3ea1dd2..04bc6a0 100644
--- a/pcs/cli/common/test/test_reports.py
+++ b/pcs/cli/common/test/test_reports.py
@@ -6,7 +6,9 @@ from __future__ import (
)
from pcs.test.tools.pcs_unittest import TestCase
+
from collections import namedtuple
+from functools import partial
from pcs.cli.common.reports import build_message_from_report
@@ -29,7 +31,9 @@ class BuildMessageFromReportTest(TestCase):
"Message force text is inside",
build_message_from_report(
{
- "SOME": "Message {force} is inside",
+ "SOME": lambda info, force_text:
+ "Message "+force_text+" is inside"
+ ,
},
ReportItem("SOME", {}),
"force text"
@@ -82,3 +86,40 @@ class BuildMessageFromReportTest(TestCase):
ReportItem("SOME", {}),
)
)
+
+ def test_callable_is_partial_object(self):
+ code_builder_map = {
+ "SOME": partial(
+ lambda title, info: "{title}: {message}".format(
+ title=title, **info
+ ),
+ "Info"
+ )
+ }
+ self.assertEqual(
+ "Info: MESSAGE",
+ build_message_from_report(
+ code_builder_map,
+ ReportItem("SOME", {"message": "MESSAGE"})
+ )
+ )
+
+ def test_callable_is_partial_object_with_force(self):
+ code_builder_map = {
+ "SOME": partial(
+ lambda title, info, force_text:
+ "{title}: {message} {force_text}".format(
+ title=title, force_text=force_text, **info
+ ),
+ "Info"
+ )
+ }
+ self.assertEqual(
+ "Info: MESSAGE force text",
+ build_message_from_report(
+ code_builder_map,
+ ReportItem("SOME", {"message": "MESSAGE"}),
+ "force text"
+ )
+ )
+
diff --git a/pcs/cli/constraint_all/console_report.py b/pcs/cli/constraint_all/console_report.py
index 2288272..dac0554 100644
--- a/pcs/cli/constraint_all/console_report.py
+++ b/pcs/cli/constraint_all/console_report.py
@@ -43,12 +43,14 @@ def constraint_plain(constraint_type, options_dict, with_id=False):
return type_report_map[constraint_type](options_dict, with_id)
-#Each value (callable taking report_item.info) returns string template.
-#Optionaly the template can contain placehodler {force} for next processing.
-#Placeholder {force} will be appended if is necessary and if is not presset
+#Each value (a callable taking report_item.info) returns a message.
+#Force text will be appended if necessary.
+#If it is necessary to put the force text inside the string then the callable
+#must take the force_text parameter.
CODE_TO_MESSAGE_BUILDER_MAP = {
- codes.DUPLICATE_CONSTRAINTS_EXIST: lambda info:
- "duplicate constraint already exists{force}\n" + "\n".join([
+ codes.DUPLICATE_CONSTRAINTS_EXIST: lambda info, force_text:
+ "duplicate constraint already exists{0}\n".format(force_text)
+ + "\n".join([
" " + constraint(info["constraint_type"], constraint_info)
for constraint_info in info["constraint_info_list"]
])
@@ -59,7 +61,9 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
"{resource_id} is a {mode} resource, you should use the"
" {parent_type} id: {parent_id} when adding constraints"
).format(
- mode="master/slave" if info["parent_type"] == "master" else "clone",
+ mode="master/slave" if info["parent_type"] == "master"
+ else info["parent_type"]
+ ,
**info
)
,
diff --git a/pcs/cli/constraint_all/test/test_console_report.py b/pcs/cli/constraint_all/test/test_console_report.py
index d686ef6..ece5876 100644
--- a/pcs/cli/constraint_all/test/test_console_report.py
+++ b/pcs/cli/constraint_all/test/test_console_report.py
@@ -72,13 +72,20 @@ class DuplicateConstraintsReportTest(TestCase):
self.assertEqual(
"\n".join([
- "duplicate constraint already exists{force}",
+ "duplicate constraint already exists force text",
" constraint info"
]),
- self.build({
- "constraint_info_list": [{"options": {"a": "b"}}],
- "constraint_type": "rsc_some"
- })
+ self.build(
+ {
+ "constraint_info_list": [{"options": {"a": "b"}}],
+ "constraint_type": "rsc_some"
+ },
+ force_text=" force text"
+ )
+ )
+ mock_constraint.assert_called_once_with(
+ "rsc_some",
+ {"options": {"a": "b"}}
)
class ResourceForConstraintIsMultiinstanceTest(TestCase):
@@ -110,3 +117,15 @@ class ResourceForConstraintIsMultiinstanceTest(TestCase):
"parent_id": "RESOURCE_CLONE"
})
)
+
+ def test_build_message_for_bundle(self):
+ self.assertEqual(
+ "RESOURCE_PRIMITIVE is a bundle resource, you should use the"
+ " bundle id: RESOURCE_CLONE when adding constraints"
+ ,
+ self.build({
+ "resource_id": "RESOURCE_PRIMITIVE",
+ "parent_type": "bundle",
+ "parent_id": "RESOURCE_CLONE"
+ })
+ )
diff --git a/pcs/cli/constraint_order/console_report.py b/pcs/cli/constraint_order/console_report.py
index 8a54224..42aa81a 100644
--- a/pcs/cli/constraint_order/console_report.py
+++ b/pcs/cli/constraint_order/console_report.py
@@ -4,7 +4,7 @@ from __future__ import (
print_function,
unicode_literals,
)
-from pcs.lib.pacemaker_values import is_true
+from pcs.lib.pacemaker.values import is_true
def constraint_plain(constraint_info, with_id=False):
"""
diff --git a/pcs/cli/fencing_topology.py b/pcs/cli/fencing_topology.py
new file mode 100644
index 0000000..52367f2
--- /dev/null
+++ b/pcs/cli/fencing_topology.py
@@ -0,0 +1,24 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.common.fencing_topology import (
+ TARGET_TYPE_NODE,
+ TARGET_TYPE_REGEXP,
+ TARGET_TYPE_ATTRIBUTE,
+)
+
+__target_type_map = {
+ "attrib": TARGET_TYPE_ATTRIBUTE,
+ "node": TARGET_TYPE_NODE,
+ "regexp": TARGET_TYPE_REGEXP,
+}
+
+target_type_map_cli_to_lib = __target_type_map
+
+target_type_map_lib_to_cli = dict([
+ (value, key) for key, value in __target_type_map.items()
+])
diff --git a/pcs/test/tools/test/__init__.py b/pcs/cli/resource/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/cli/resource/__init__.py
diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
new file mode 100644
index 0000000..19ee8f9
--- /dev/null
+++ b/pcs/cli/resource/parse_args.py
@@ -0,0 +1,191 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+from pcs.cli.common.parse_args import group_by_keywords, prepare_options
+from pcs.cli.common.errors import CmdLineInputError
+
+
+def parse_create_simple(arg_list):
+ groups = group_by_keywords(
+ arg_list,
+ set(["op", "meta"]),
+ implicit_first_group_key="options",
+ group_repeated_keywords=["op"],
+ )
+
+ parts = {
+ "meta": prepare_options(groups.get("meta", [])),
+ "options": prepare_options(groups.get("options", [])),
+ "op": [
+ prepare_options(op)
+ for op in build_operations(groups.get("op", []))
+ ],
+ }
+
+ return parts
+
+def parse_create(arg_list):
+ groups = group_by_keywords(
+ arg_list,
+ set(["op", "meta", "clone", "master", "bundle"]),
+ implicit_first_group_key="options",
+ group_repeated_keywords=["op"],
+ only_found_keywords=True,
+ )
+
+ parts = {
+ "meta": prepare_options(groups.get("meta", [])),
+ "options": prepare_options(groups.get("options", [])),
+ "op": [
+ prepare_options(op)
+ for op in build_operations(groups.get("op", []))
+ ],
+ }
+
+ if "clone" in groups:
+ parts["clone"] = prepare_options(groups["clone"])
+
+ if "master" in groups:
+ parts["master"] = prepare_options(groups["master"])
+
+ if "bundle" in groups:
+ parts["bundle"] = groups["bundle"]
+
+ return parts
+
+def _parse_bundle_groups(arg_list):
+ repeatable_keyword_list = ["port-map", "storage-map"]
+ keyword_list = ["container", "network"] + repeatable_keyword_list
+ groups = group_by_keywords(
+ arg_list,
+ set(keyword_list),
+ group_repeated_keywords=repeatable_keyword_list,
+ only_found_keywords=True,
+ )
+ for keyword in keyword_list:
+ if keyword not in groups:
+ continue
+ if keyword in repeatable_keyword_list:
+ for repeated_section in groups[keyword]:
+ if len(repeated_section) == 0:
+ raise CmdLineInputError(
+ "No {0} options specified".format(keyword)
+ )
+ else:
+ if len(groups[keyword]) == 0:
+ raise CmdLineInputError(
+ "No {0} options specified".format(keyword)
+ )
+ return groups
+
+def parse_bundle_create_options(arg_list):
+ groups = _parse_bundle_groups(arg_list)
+ container_options = groups.get("container", [])
+ container_type = None
+ if container_options and "=" not in container_options[0]:
+ container_type = container_options.pop(0)
+ parts = {
+ "container_type": container_type,
+ "container": prepare_options(container_options),
+ "network": prepare_options(groups.get("network", [])),
+ "port_map": [
+ prepare_options(port_map)
+ for port_map in groups.get("port-map", [])
+ ],
+ "storage_map": [
+ prepare_options(storage_map)
+ for storage_map in groups.get("storage-map", [])
+ ],
+ }
+ if not parts["container_type"]:
+ parts["container_type"] = "docker"
+ return parts
+
+def _split_bundle_map_update_op_and_options(
+ map_arg_list, result_parts, map_name
+):
+ if len(map_arg_list) < 2:
+ raise _bundle_map_update_not_valid(map_name)
+ op, options = map_arg_list[0], map_arg_list[1:]
+ if op == "add":
+ result_parts[op].append(prepare_options(options))
+ elif op == "remove":
+ result_parts[op].extend(options)
+ else:
+ raise _bundle_map_update_not_valid(map_name)
+
+def _bundle_map_update_not_valid(map_name):
+ return CmdLineInputError(
+ (
+ "When using '{map}' you must specify either 'add' and options or "
+ "'remove' and id(s)"
+ ).format(map=map_name)
+ )
+
+def parse_bundle_update_options(arg_list):
+ groups = _parse_bundle_groups(arg_list)
+ port_map = {"add": [], "remove": []}
+ for map_group in groups.get("port-map", []):
+ _split_bundle_map_update_op_and_options(
+ map_group, port_map, "port-map"
+ )
+ storage_map = {"add": [], "remove": []}
+ for map_group in groups.get("storage-map", []):
+ _split_bundle_map_update_op_and_options(
+ map_group, storage_map, "storage-map"
+ )
+ parts = {
+ "container": prepare_options(groups.get("container", [])),
+ "network": prepare_options(groups.get("network", [])),
+ "port_map_add": port_map["add"],
+ "port_map_remove": port_map["remove"],
+ "storage_map_add": storage_map["add"],
+ "storage_map_remove": storage_map["remove"],
+ }
+ return parts
+
+def build_operations(op_group_list):
+ """
+ Return a list of dicts. Each dict represents one operation.
+ list of list op_group_list contains items that have parameters after "op"
+ (so item can contain multiple operations) for example: [
+ [monitor timeout=1 start timeout=2],
+ [monitor timeout=3 interval=10],
+ ]
+ """
+ operation_list = []
+ for op_group in op_group_list:
+ #empty operation is not allowed
+ if not op_group:
+ raise __not_enough_parts_in_operation()
+
+ #every operation group needs to start with operation name
+ if "=" in op_group[0]:
+ raise __every_operation_needs_name()
+
+ for arg in op_group:
+ if "=" not in arg:
+ operation_list.append(["name={0}".format(arg)])
+ else:
+ operation_list[-1].append(arg)
+
+ #every operation needs at least name and one option
+ #there can be more than one operation in op_group: check is after processing
+ if any([len(operation) < 2 for operation in operation_list]):
+ raise __not_enough_parts_in_operation()
+
+ return operation_list
+
+def __not_enough_parts_in_operation():
+ return CmdLineInputError(
+ "When using 'op' you must specify an operation name"
+ " and at least one option"
+ )
+
+def __every_operation_needs_name():
+ return CmdLineInputError(
+ "When using 'op' you must specify an operation name after 'op'"
+ )
diff --git a/pcs/test/tools/test/__init__.py b/pcs/cli/resource/test/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/cli/resource/test/__init__.py
diff --git a/pcs/cli/resource/test/test_parse_args.py b/pcs/cli/resource/test/test_parse_args.py
new file mode 100644
index 0000000..5033ec7
--- /dev/null
+++ b/pcs/cli/resource/test/test_parse_args.py
@@ -0,0 +1,672 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.cli.resource import parse_args
+from pcs.cli.common.errors import CmdLineInputError
+
+class ParseCreateArgs(TestCase):
+ def assert_produce(self, arg_list, result):
+ self.assertEqual(parse_args.parse_create(arg_list), result)
+
+ def test_no_args(self):
+ self.assert_produce([], {
+ "meta": {},
+ "options": {},
+ "op": [],
+ })
+
+ def test_only_instance_attributes(self):
+ self.assert_produce(["a=b", "c=d"], {
+ "meta": {},
+ "options": {
+ "a": "b",
+ "c": "d",
+ },
+ "op": [],
+ })
+
+ def test_only_meta(self):
+ self.assert_produce(["meta", "a=b", "c=d"], {
+ "options": {},
+ "op": [],
+ "meta": {
+ "a": "b",
+ "c": "d",
+ },
+ })
+
+ def test_only_clone(self):
+ self.assert_produce(["clone", "a=b", "c=d"], {
+ "meta": {},
+ "options": {},
+ "op": [],
+ "clone": {
+ "a": "b",
+ "c": "d",
+ },
+ })
+
+ def test_only_operations(self):
+ self.assert_produce([
+ "op", "monitor", "a=b", "c=d", "start", "e=f",
+ ], {
+ "meta": {},
+ "options": {},
+ "op": [
+ {"name": "monitor", "a": "b", "c": "d"},
+ {"name": "start", "e": "f"},
+ ],
+ })
+
+ def test_args_op_clone_meta(self):
+ self.assert_produce([
+ "a=b", "c=d",
+ "meta", "e=f", "g=h",
+ "op", "monitor", "i=j", "k=l", "start", "m=n",
+ "clone", "o=p", "q=r",
+ ], {
+ "options": {
+ "a": "b",
+ "c": "d",
+ },
+ "op": [
+ {"name": "monitor", "i": "j", "k": "l"},
+ {"name": "start", "m": "n"},
+ ],
+ "meta": {
+ "e": "f",
+ "g": "h",
+ },
+ "clone": {
+ "o": "p",
+ "q": "r",
+ },
+ })
+
+ def assert_raises_cmdline(self, args):
+ self.assertRaises(
+ CmdLineInputError,
+ lambda: parse_args.parse_create(args)
+ )
+
+ def test_raises_when_operation_name_does_not_follow_op_keyword(self):
+ self.assert_raises_cmdline(["op", "a=b"])
+ self.assert_raises_cmdline(["op", "monitor", "a=b", "op", "c=d"])
+
+ def test_raises_when_operation_have_no_option(self):
+ self.assert_raises_cmdline(
+ ["op", "monitor", "a=b", "start", "stop", "c=d"]
+ )
+ self.assert_raises_cmdline(
+ ["op", "monitor", "a=b", "stop", "c=d", "op", "start"]
+ )
+
+ def test_allow_to_repeat_op(self):
+ self.assert_produce([
+ "op", "monitor", "a=b", "c=d",
+ "op", "start", "e=f",
+ ], {
+ "meta": {},
+ "options": {},
+ "op": [
+ {"name": "monitor", "a": "b", "c": "d"},
+ {"name": "start", "e": "f"},
+ ],
+ })
+
+ def test_deal_with_empty_operatins(self):
+ self.assert_raises_cmdline(["op", "monitoring", "a=b", "op"])
+
+
+class ParseCreateSimple(TestCase):
+ def assert_produce(self, arg_list, result):
+ self.assertEqual(parse_args.parse_create_simple(arg_list), result)
+
+ def test_without_args(self):
+ self.assert_produce([], {
+ "meta": {},
+ "options": {},
+ "op": [],
+ })
+
+ def test_only_instance_attributes(self):
+ self.assert_produce(["a=b", "c=d"], {
+ "meta": {},
+ "options": {
+ "a": "b",
+ "c": "d",
+ },
+ "op": [],
+ })
+
+ def test_only_meta(self):
+ self.assert_produce(["meta", "a=b", "c=d"], {
+ "options": {},
+ "op": [],
+ "meta": {
+ "a": "b",
+ "c": "d",
+ },
+ })
+
+ def test_only_operations(self):
+ self.assert_produce([
+ "op", "monitor", "a=b", "c=d", "start", "e=f",
+ ], {
+ "meta": {},
+ "options": {},
+ "op": [
+ {"name": "monitor", "a": "b", "c": "d"},
+ {"name": "start", "e": "f"},
+ ],
+ })
+
+ def assert_raises_cmdline(self, args):
+ self.assertRaises(
+ CmdLineInputError,
+ lambda: parse_args.parse_create_simple(args)
+ )
+
+ def test_raises_when_operation_name_does_not_follow_op_keyword(self):
+ self.assert_raises_cmdline(["op", "a=b"])
+ self.assert_raises_cmdline(["op", "monitor", "a=b", "op", "c=d"])
+
+ def test_raises_when_operation_have_no_option(self):
+ self.assert_raises_cmdline(
+ ["op", "monitor", "a=b", "start", "stop", "c=d"]
+ )
+ self.assert_raises_cmdline(
+ ["op", "monitor", "a=b", "stop", "c=d", "op", "start"]
+ )
+
+ def test_allow_to_repeat_op(self):
+ self.assert_produce([
+ "op", "monitor", "a=b", "c=d",
+ "op", "start", "e=f",
+ ], {
+ "meta": {},
+ "options": {},
+ "op": [
+ {"name": "monitor", "a": "b", "c": "d"},
+ {"name": "start", "e": "f"},
+ ],
+ })
+
+
+class ParseBundleCreateOptions(TestCase):
+ def assert_produce(self, arg_list, result):
+ self.assertEqual(
+ result,
+ parse_args.parse_bundle_create_options(arg_list)
+ )
+
+ def assert_raises_cmdline(self, arg_list):
+ self.assertRaises(
+ CmdLineInputError,
+ lambda: parse_args.parse_bundle_create_options(arg_list)
+ )
+
+ def test_no_args(self):
+ self.assert_produce(
+ [],
+ {
+ "container_type": "docker",
+ "container": {},
+ "network": {},
+ "port_map": [],
+ "storage_map": [],
+ }
+ )
+
+ def test_container_empty(self):
+ self.assert_raises_cmdline(["container"])
+
+ def test_container_type(self):
+ self.assert_produce(
+ ["container", "lxc"],
+ {
+ "container_type": "lxc",
+ "container": {},
+ "network": {},
+ "port_map": [],
+ "storage_map": [],
+ }
+ )
+
+ def test_container_options(self):
+ self.assert_produce(
+ ["container", "a=b", "c=d"],
+ {
+ "container_type": "docker",
+ "container": {"a": "b", "c": "d"},
+ "network": {},
+ "port_map": [],
+ "storage_map": [],
+ }
+ )
+
+ def test_container_type_and_options(self):
+ self.assert_produce(
+ ["container", "lxc", "a=b", "c=d"],
+ {
+ "container_type": "lxc",
+ "container": {"a": "b", "c": "d"},
+ "network": {},
+ "port_map": [],
+ "storage_map": [],
+ }
+ )
+
+ def test_container_type_must_be_first(self):
+ self.assert_raises_cmdline(["container", "a=b", "docker", "c=d"])
+
+ def test_container_missing_value(self):
+ self.assert_raises_cmdline(["container", "docker", "a", "c=d"])
+
+ def test_container_missing_key(self):
+ self.assert_raises_cmdline(["container", "docker", "=b", "c=d"])
+
+ def test_network(self):
+ self.assert_produce(
+ ["network", "a=b", "c=d"],
+ {
+ "container_type": "docker",
+ "container": {},
+ "network": {"a": "b", "c": "d"},
+ "port_map": [],
+ "storage_map": [],
+ }
+ )
+
+ def test_network_empty(self):
+ self.assert_raises_cmdline(["network"])
+
+ def test_network_missing_value(self):
+ self.assert_raises_cmdline(["network", "a", "c=d"])
+
+ def test_network_missing_key(self):
+ self.assert_raises_cmdline(["network", "=b", "c=d"])
+
+ def test_port_map_empty(self):
+ self.assert_raises_cmdline(["port-map"])
+
+ def test_one_of_port_map_empty(self):
+ self.assert_raises_cmdline(
+ ["port-map", "a=b", "port-map", "network", "c=d"]
+ )
+
+ def test_port_map_one(self):
+ self.assert_produce(
+ ["port-map", "a=b", "c=d"],
+ {
+ "container_type": "docker",
+ "container": {},
+ "network": {},
+ "port_map": [{"a": "b", "c": "d"}],
+ "storage_map": [],
+ }
+ )
+
+ def test_port_map_more(self):
+ self.assert_produce(
+ ["port-map", "a=b", "c=d", "port-map", "e=f"],
+ {
+ "container_type": "docker",
+ "container": {},
+ "network": {},
+ "port_map": [{"a": "b", "c": "d"}, {"e": "f"}],
+ "storage_map": [],
+ }
+ )
+
+ def test_port_map_missing_value(self):
+ self.assert_raises_cmdline(["port-map", "a", "c=d"])
+
+ def test_port_map_missing_key(self):
+ self.assert_raises_cmdline(["port-map", "=b", "c=d"])
+
+ def test_storage_map_empty(self):
+ self.assert_raises_cmdline(["storage-map"])
+
+ def test_one_of_storage_map_empty(self):
+ self.assert_raises_cmdline(
+ ["storage-map", "port-map", "a=b", "storage-map", "c=d"]
+ )
+
+ def test_storage_map_one(self):
+ self.assert_produce(
+ ["storage-map", "a=b", "c=d"],
+ {
+ "container_type": "docker",
+ "container": {},
+ "network": {},
+ "port_map": [],
+ "storage_map": [{"a": "b", "c": "d"}],
+ }
+ )
+
+ def test_storage_map_more(self):
+ self.assert_produce(
+ ["storage-map", "a=b", "c=d", "storage-map", "e=f"],
+ {
+ "container_type": "docker",
+ "container": {},
+ "network": {},
+ "port_map": [],
+ "storage_map": [{"a": "b", "c": "d"}, {"e": "f"}],
+ }
+ )
+
+ def test_storage_map_missing_value(self):
+ self.assert_raises_cmdline(["storage-map", "a", "c=d"])
+
+ def test_storage_map_missing_key(self):
+ self.assert_raises_cmdline(["storage-map", "=b", "c=d"])
+
+ def test_all(self):
+ self.assert_produce(
+ [
+ "container", "lxc", "a=b", "c=d",
+ "network", "e=f", "g=h",
+ "port-map", "i=j", "k=l",
+ "port-map", "m=n", "o=p",
+ "storage-map", "q=r", "s=t",
+ "storage-map", "u=v", "w=x",
+ ],
+ {
+ "container_type": "lxc",
+ "container": {"a": "b", "c": "d"},
+ "network": {"e": "f", "g": "h"},
+ "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
+ "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
+ }
+ )
+
+ def test_all_mixed(self):
+ self.assert_produce(
+ [
+ "storage-map", "q=r", "s=t",
+ "port-map", "i=j", "k=l",
+ "network", "e=f",
+ "container", "lxc", "a=b",
+ "storage-map", "u=v", "w=x",
+ "port-map", "m=n", "o=p",
+ "network", "g=h",
+ "container", "c=d",
+ ],
+ {
+ "container_type": "lxc",
+ "container": {"a": "b", "c": "d"},
+ "network": {"e": "f", "g": "h"},
+ "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
+ "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
+ }
+ )
+
+
+class ParseBundleUpdateOptions(TestCase):
+ def assert_produce(self, arg_list, result):
+ self.assertEqual(
+ result,
+ parse_args.parse_bundle_update_options(arg_list)
+ )
+
+ def assert_raises_cmdline(self, arg_list):
+ self.assertRaises(
+ CmdLineInputError,
+ lambda: parse_args.parse_bundle_update_options(arg_list)
+ )
+
+ def test_no_args(self):
+ self.assert_produce(
+ [],
+ {
+ "container": {},
+ "network": {},
+ "port_map_add": [],
+ "port_map_remove": [],
+ "storage_map_add": [],
+ "storage_map_remove": [],
+ }
+ )
+
+ def test_container_options(self):
+ self.assert_produce(
+ ["container", "a=b", "c=d"],
+ {
+ "container": {"a": "b", "c": "d"},
+ "network": {},
+ "port_map_add": [],
+ "port_map_remove": [],
+ "storage_map_add": [],
+ "storage_map_remove": [],
+ }
+ )
+
+ def test_container_empty(self):
+ self.assert_raises_cmdline(["container"])
+
+ def test_container_missing_value(self):
+ self.assert_raises_cmdline(["container", "a", "c=d"])
+
+ def test_container_missing_key(self):
+ self.assert_raises_cmdline(["container", "=b", "c=d"])
+
+ def test_network(self):
+ self.assert_produce(
+ ["network", "a=b", "c=d"],
+ {
+ "container": {},
+ "network": {"a": "b", "c": "d"},
+ "port_map_add": [],
+ "port_map_remove": [],
+ "storage_map_add": [],
+ "storage_map_remove": [],
+ }
+ )
+
+ def test_network_empty(self):
+ self.assert_raises_cmdline(["network"])
+
+ def test_network_missing_value(self):
+ self.assert_raises_cmdline(["network", "a", "c=d"])
+
+ def test_network_missing_key(self):
+ self.assert_raises_cmdline(["network", "=b", "c=d"])
+
+ def test_port_map_empty(self):
+ self.assert_raises_cmdline(["port-map"])
+
+ def test_one_of_port_map_empty(self):
+ self.assert_raises_cmdline(
+ ["port-map", "a=b", "port-map", "network", "c=d"]
+ )
+
+ def test_port_map_missing_params(self):
+ self.assert_raises_cmdline(["port-map"])
+ self.assert_raises_cmdline(["port-map add"])
+ self.assert_raises_cmdline(["port-map remove"])
+
+ def test_port_map_wrong_keyword(self):
+ self.assert_raises_cmdline(["port-map", "wrong", "a=b"])
+
+ def test_port_map_missing_value(self):
+ self.assert_raises_cmdline(["port-map", "add", "a", "c=d"])
+
+ def test_port_map_missing_key(self):
+ self.assert_raises_cmdline(["port-map", "add", "=b", "c=d"])
+
+ def test_port_map_more(self):
+ self.assert_produce(
+ [
+ "port-map", "add", "a=b",
+ "port-map", "remove", "c", "d",
+ "port-map", "add", "e=f", "g=h",
+ "port-map", "remove", "i",
+ ],
+ {
+ "container": {},
+ "network": {},
+ "port_map_add": [
+ {"a": "b", },
+ {"e": "f", "g": "h",},
+ ],
+ "port_map_remove": ["c", "d", "i"],
+ "storage_map_add": [],
+ "storage_map_remove": [],
+ }
+ )
+
+ def test_storage_map_empty(self):
+ self.assert_raises_cmdline(["storage-map"])
+
+ def test_one_of_storage_map_empty(self):
+ self.assert_raises_cmdline(
+ ["storage-map", "port-map", "a=b", "storage-map", "c=d"]
+ )
+
+ def test_storage_map_missing_params(self):
+ self.assert_raises_cmdline(["storage-map"])
+ self.assert_raises_cmdline(["storage-map add"])
+ self.assert_raises_cmdline(["storage-map remove"])
+
+ def test_storage_map_wrong_keyword(self):
+ self.assert_raises_cmdline(["storage-map", "wrong", "a=b"])
+
+ def test_storage_map_missing_value(self):
+ self.assert_raises_cmdline(["storage-map", "add", "a", "c=d"])
+
+ def test_storage_map_missing_key(self):
+ self.assert_raises_cmdline(["storage-map", "add", "=b", "c=d"])
+
+ def test_storage_map_more(self):
+ self.assert_produce(
+ [
+ "storage-map", "add", "a=b",
+ "storage-map", "remove", "c", "d",
+ "storage-map", "add", "e=f", "g=h",
+ "storage-map", "remove", "i",
+ ],
+ {
+ "container": {},
+ "network": {},
+ "port_map_add": [],
+ "port_map_remove": [],
+ "storage_map_add": [
+ {"a": "b", },
+ {"e": "f", "g": "h",},
+ ],
+ "storage_map_remove": ["c", "d", "i"],
+ }
+ )
+
+ def test_all(self):
+ self.assert_produce(
+ [
+ "container", "a=b", "c=d",
+ "network", "e=f", "g=h",
+ "port-map", "add", "i=j", "k=l",
+ "port-map", "add", "m=n",
+ "port-map", "remove", "o", "p",
+ "port-map", "remove", "q",
+ "storage-map", "add", "r=s", "t=u",
+ "storage-map", "add", "v=w",
+ "storage-map", "remove", "x", "y",
+ "storage-map", "remove", "z",
+ ],
+ {
+ "container": {"a": "b", "c": "d"},
+ "network": {"e": "f", "g": "h"},
+ "port_map_add": [
+ {"i": "j", "k": "l"},
+ {"m": "n"},
+ ],
+ "port_map_remove": ["o", "p", "q"],
+ "storage_map_add": [
+ {"r": "s", "t": "u"},
+ {"v": "w"},
+ ],
+ "storage_map_remove": ["x", "y", "z"],
+ }
+ )
+
+ def test_all_mixed(self):
+ self.assert_produce(
+ [
+ "storage-map", "remove", "x", "y",
+ "port-map", "remove", "o", "p",
+ "network", "e=f", "g=h",
+ "storage-map", "add", "r=s", "t=u",
+ "port-map", "add", "i=j", "k=l",
+ "container", "a=b", "c=d",
+ "port-map", "remove", "q",
+ "storage-map", "remove", "z",
+ "storage-map", "add", "v=w",
+ "port-map", "add", "m=n",
+ ],
+ {
+ "container": {"a": "b", "c": "d"},
+ "network": {"e": "f", "g": "h"},
+ "port_map_add": [
+ {"i": "j", "k": "l"},
+ {"m": "n"},
+ ],
+ "port_map_remove": ["o", "p", "q"],
+ "storage_map_add": [
+ {"r": "s", "t": "u"},
+ {"v": "w"},
+ ],
+ "storage_map_remove": ["x", "y", "z"],
+ }
+ )
+
+
+class BuildOperations(TestCase):
+ def assert_produce(self, arg_list, result):
+ self.assertEqual(result, parse_args.build_operations(arg_list))
+
+ def assert_raises_cmdline(self, arg_list):
+ self.assertRaises(
+ CmdLineInputError,
+ lambda: parse_args.build_operations(arg_list)
+ )
+
+ def test_return_empty_list_on_empty_input(self):
+ self.assert_produce([], [])
+
+ def test_return_all_operations_specified_in_the_same_group(self):
+ self.assert_produce(
+ [
+ ["monitor", "interval=10s", "start", "timeout=20s"]
+ ],
+ [
+ ["name=monitor", "interval=10s"],
+ ["name=start", "timeout=20s"],
+ ]
+ )
+
+ def test_return_all_operations_specified_in_different_groups(self):
+ self.assert_produce(
+ [
+ ["monitor", "interval=10s"],
+ ["start", "timeout=20s"],
+ ],
+ [
+ ["name=monitor", "interval=10s"],
+ ["name=start", "timeout=20s"],
+ ]
+ )
+
+ def test_refuse_empty_operation(self):
+ self.assert_raises_cmdline([[]])
+
+ def test_refuse_operation_without_attribute(self):
+ self.assert_raises_cmdline([["monitor"]])
+
+ def test_refuse_operation_without_name(self):
+ self.assert_raises_cmdline([["interval=10s"]])
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 0a5918e..d64194d 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -30,25 +30,29 @@ from pcs import (
resource,
settings,
status,
- stonith,
usage,
utils,
)
from pcs.utils import parallel_for_nodes
from pcs.common import report_codes
+from pcs.cli.common.errors import (
+ CmdLineInputError,
+ ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE,
+)
from pcs.cli.common.reports import process_library_reports, build_report_message
+import pcs.cli.cluster.command as cluster_command
from pcs.lib import (
- pacemaker as lib_pacemaker,
sbd as lib_sbd,
reports as lib_reports,
)
from pcs.lib.booth import sync as booth_sync
-from pcs.lib.nodes_task import check_corosync_offline_on_nodes
+from pcs.lib.commands.cluster import _share_authkey, _destroy_pcmk_remote_env
from pcs.lib.commands.quorum import _add_device_model_net
from pcs.lib.corosync import (
config_parser as corosync_conf_utils,
qdevice_net,
)
+from pcs.cli.common.console_report import warn, error
from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
from pcs.lib.errors import (
LibraryError,
@@ -57,11 +61,16 @@ from pcs.lib.errors import (
from pcs.lib.external import (
disable_service,
is_systemctl,
+ NodeCommandUnsuccessfulException,
NodeCommunicationException,
node_communicator_exception_to_report_item,
)
-from pcs.lib.node import NodeAddresses
-from pcs.lib.tools import environment_file_to_dict
+from pcs.lib.env_tools import get_nodes
+from pcs.lib.node import NodeAddresses, NodeAddressesList
+from pcs.lib.nodes_task import check_corosync_offline_on_nodes, distribute_files
+from pcs.lib import node_communication_format
+import pcs.lib.pacemaker.live as lib_pacemaker
+from pcs.lib.tools import environment_file_to_dict, generate_key
def cluster_cmd(argv):
if len(argv) == 0:
@@ -70,7 +79,7 @@ def cluster_cmd(argv):
sub_cmd = argv.pop(0)
if (sub_cmd == "help"):
- usage.cluster(argv)
+ usage.cluster([" ".join(argv)] if argv else [])
elif (sub_cmd == "setup"):
if "--name" in utils.pcs_options:
cluster_setup([utils.pcs_options["--name"]] + argv)
@@ -94,32 +103,63 @@ def cluster_cmd(argv):
cluster_token_nodes(argv)
elif (sub_cmd == "start"):
if "--all" in utils.pcs_options:
+ if argv:
+ utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
start_cluster_all()
else:
start_cluster(argv)
elif (sub_cmd == "stop"):
if "--all" in utils.pcs_options:
+ if argv:
+ utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
stop_cluster_all()
else:
stop_cluster(argv)
elif (sub_cmd == "kill"):
kill_cluster(argv)
elif (sub_cmd == "standby"):
- node.node_standby(argv)
+ try:
+ node.node_standby_cmd(
+ utils.get_library_wrapper(),
+ argv,
+ utils.get_modificators(),
+ True
+ )
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "node", "standby")
elif (sub_cmd == "unstandby"):
- node.node_standby(argv, False)
+ try:
+ node.node_standby_cmd(
+ utils.get_library_wrapper(),
+ argv,
+ utils.get_modificators(),
+ False
+ )
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "node", "unstandby")
elif (sub_cmd == "enable"):
if "--all" in utils.pcs_options:
+ if argv:
+ utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
enable_cluster_all()
else:
enable_cluster(argv)
elif (sub_cmd == "disable"):
if "--all" in utils.pcs_options:
+ if argv:
+ utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
disable_cluster_all()
else:
disable_cluster(argv)
elif (sub_cmd == "remote-node"):
- cluster_remote_node(argv)
+ try:
+ cluster_remote_node(argv)
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
elif (sub_cmd == "cib"):
get_cib(argv)
elif (sub_cmd == "cib-push"):
@@ -129,7 +169,34 @@ def cluster_cmd(argv):
elif (sub_cmd == "edit"):
cluster_edit(argv)
elif (sub_cmd == "node"):
- cluster_node(argv)
+ if not argv:
+ usage.cluster(["node"])
+ sys.exit(1)
+
+ remote_node_command_map = {
+ "add-remote": cluster_command.node_add_remote,
+ "add-guest": cluster_command.node_add_guest,
+ "remove-remote": cluster_command.create_node_remove_remote(
+ resource.resource_remove
+ ),
+ "remove-guest": cluster_command.node_remove_guest,
+ "clear": cluster_command.node_clear,
+ }
+ if argv[0] in remote_node_command_map:
+ try:
+ remote_node_command_map[argv[0]](
+ utils.get_library_wrapper(),
+ argv[1:],
+ utils.get_modificators()
+ )
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(
+ e, "cluster", "node " + argv[0]
+ )
+ else:
+ cluster_node(argv)
elif (sub_cmd == "localnode"):
cluster_localnode(argv)
elif (sub_cmd == "uidgid"):
@@ -360,8 +427,12 @@ def cluster_setup(argv):
# checks that nodes are authenticated as well
if "--force" not in utils.pcs_options:
all_nodes_available = True
+ lib_env = utils.get_lib_env()
for node in primary_addr_list:
- available, message = utils.canAddNodeToCluster(node)
+ available, message = utils.canAddNodeToCluster(
+ lib_env.node_communicator(),
+ NodeAddresses(node)
+ )
if not available:
all_nodes_available = False
utils.err("{0}: {1}".format(node, message), False)
@@ -376,6 +447,30 @@ def cluster_setup(argv):
destroy_cluster(primary_addr_list)
print()
+ try:
+ file_definitions = {}
+ file_definitions.update(
+ node_communication_format.pcmk_authkey_file(generate_key())
+ )
+ file_definitions.update(
+ node_communication_format.corosync_authkey_file(
+ generate_key(random_bytes_count=128)
+ )
+ )
+
+ distribute_files(
+ lib_env.node_communicator(),
+ lib_env.report_processor,
+ file_definitions,
+ NodeAddressesList(
+ [NodeAddresses(node) for node in primary_addr_list]
+ ),
+ allow_incomplete_distribution="--force" in utils.pcs_options
+ )
+ except LibraryError as e: #Theoretically, this should not happen
+ utils.process_library_reports(e.args)
+
+
# send local cluster pcsd configs to the new nodes
print("Sending cluster config files to the nodes...")
pcsd_data = {
@@ -653,7 +748,6 @@ def cluster_setup_create_corosync_conf(
corosync_conf.add_section(logging_section)
totem_section.add_attribute("version", "2")
- totem_section.add_attribute("secauth", "off")
totem_section.add_attribute("cluster_name", cluster_name)
transport_options_names = (
@@ -934,6 +1028,7 @@ def is_node_fully_started(node_status):
def wait_for_local_node_started(stop_at, interval):
try:
while True:
+ time.sleep(interval)
node_status = lib_pacemaker.get_local_node_status(
utils.cmd_runner()
)
@@ -941,7 +1036,6 @@ def wait_for_local_node_started(stop_at, interval):
return 0, "Started"
if datetime.datetime.now() > stop_at:
return 1, "Waiting timeout"
- time.sleep(interval)
except LibraryError as e:
return 1, "Unable to get node status: {0}".format(
"\n".join([build_report_message(item) for item in e.args])
@@ -949,6 +1043,7 @@ def wait_for_local_node_started(stop_at, interval):
def wait_for_remote_node_started(node, stop_at, interval):
while True:
+ time.sleep(interval)
code, output = utils.getPacemakerNodeStatus(node)
# HTTP error, permission denied or unable to auth
# there is no point in trying again as it won't get magically fixed
@@ -964,7 +1059,6 @@ def wait_for_remote_node_started(node, stop_at, interval):
return 1, "Unable to get node status"
if datetime.datetime.now() > stop_at:
return 1, "Waiting timeout"
- time.sleep(interval)
def wait_for_nodes_started(node_list, timeout=None):
timeout = 60 * 15 if timeout is None else timeout
@@ -1036,7 +1130,11 @@ def stop_cluster_nodes(nodes):
)
was_error = False
- node_errors = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
+ node_errors = parallel_for_nodes(
+ utils.repeat_if_timeout(utils.stopPacemaker),
+ nodes,
+ quiet=True
+ )
accessible_nodes = [
node for node in nodes if node not in node_errors.keys()
]
@@ -1047,7 +1145,7 @@ def stop_cluster_nodes(nodes):
)
was_error = True
- for node in node_errors.keys():
+ for node in node_errors:
print("{0}: Not stopping cluster - node is unreachable".format(node))
node_errors = parallel_for_nodes(
@@ -1102,7 +1200,11 @@ def destroy_cluster(argv, keep_going=False):
if len(argv) > 0:
# stop pacemaker and resources while cluster is still quorate
nodes = argv
- node_errors = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
+ node_errors = parallel_for_nodes(
+ utils.repeat_if_timeout(utils.stopPacemaker),
+ nodes,
+ quiet=True
+ )
# proceed with destroy regardless of errors
# destroy will stop any remaining cluster daemons
node_errors = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True)
@@ -1228,6 +1330,8 @@ def cluster_push(argv):
filename = None
scope = None
timeout = None
+ diff_against = None
+
if "--wait" in utils.pcs_options:
timeout = utils.validate_wait_get_timeout()
for arg in argv:
@@ -1235,16 +1339,22 @@ def cluster_push(argv):
filename = arg
else:
arg_name, arg_value = arg.split("=", 1)
- if arg_name == "scope" and "--config" not in utils.pcs_options:
+ if arg_name == "scope":
+ if "--config" in utils.pcs_options:
+ utils.err("Cannot use both scope and --config")
if not utils.is_valid_cib_scope(arg_value):
utils.err("invalid CIB scope '%s'" % arg_value)
else:
scope = arg_value
+ elif arg_name == "diff-against":
+ diff_against = arg_value
else:
usage.cluster(["cib-push"])
sys.exit(1)
if "--config" in utils.pcs_options:
scope = "configuration"
+ if diff_against and scope:
+ utils.err("Cannot use both scope and diff-against")
if not filename:
usage.cluster(["cib-push"])
sys.exit(1)
@@ -1259,18 +1369,48 @@ def cluster_push(argv):
except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
utils.err("unable to parse new cib: %s" % e)
- command = ["cibadmin", "--replace", "--xml-file", filename]
- if scope:
- command.append("--scope=%s" % scope)
- output, retval = utils.run(command)
- if retval != 0:
- utils.err("unable to push cib\n" + output)
+ if diff_against:
+ try:
+ xml.dom.minidom.parse(diff_against)
+ except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
+ utils.err("unable to parse original cib: %s" % e)
+ runner = utils.cmd_runner()
+ command = [
+ "crm_diff", "--original", diff_against, "--new", filename,
+ "--no-version"
+ ]
+ patch, error, dummy_retval = runner.run(command)
+ # dummy_retval == -1 means one of two things:
+ # a) an error has occured
+ # b) --original and --new differ
+ # therefore it's of no use to see if an error occurred
+ if error.strip():
+ utils.err("unable to diff the CIBs:\n" + error)
+ if not patch.strip():
+ utils.err(
+ "The new CIB is the same as the original CIB, nothing to push."
+ )
+
+ command = ["cibadmin", "--patch", "--xml-pipe"]
+ output, error, retval = runner.run(command, patch)
+ if retval != 0:
+ utils.err("unable to push cib\n" + error + output)
+
+ else:
+ command = ["cibadmin", "--replace", "--xml-file", filename]
+ if scope:
+ command.append("--scope=%s" % scope)
+ output, retval = utils.run(command)
+ if retval != 0:
+ utils.err("unable to push cib\n" + output)
+
print("CIB updated")
+
if "--wait" not in utils.pcs_options:
return
cmd = ["crm_resource", "--wait"]
if timeout:
- cmd.extend(["--timeout", timeout])
+ cmd.extend(["--timeout", str(timeout)])
output, retval = utils.run(cmd)
if retval != 0:
msg = []
@@ -1397,16 +1537,30 @@ def _ensure_cluster_is_offline_if_atb_should_be_enabled(
def cluster_node(argv):
- if len(argv) != 2:
- usage.cluster()
+ if len(argv) < 1:
+ usage.cluster(["node"])
sys.exit(1)
if argv[0] == "add":
add_node = True
elif argv[0] in ["remove","delete"]:
add_node = False
+ elif argv[0] == "add-outside":
+ try:
+ node_add_outside_cluster(
+ utils.get_library_wrapper(),
+ argv[1:],
+ utils.get_modificators(),
+ )
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "cluster", "node")
+ return
else:
- usage.cluster()
+ usage.cluster(["node"])
+ sys.exit(1)
+
+ if len(argv) != 2:
+ usage.cluster([" ".join(["node", argv[0]])])
sys.exit(1)
node = argv[1]
@@ -1435,238 +1589,332 @@ def cluster_node(argv):
modifiers = utils.get_modificators()
if add_node == True:
- wait = False
- wait_timeout = None
- if "--start" in utils.pcs_options and "--wait" in utils.pcs_options:
- wait_timeout = utils.validate_wait_get_timeout(False)
- wait = True
- need_ring1_address = utils.need_ring1_address(utils.getCorosyncConf())
- if not node1 and need_ring1_address:
- utils.err(
- "cluster is configured for RRP, "
- "you have to specify ring 1 address for the node"
- )
- elif node1 and not need_ring1_address:
- utils.err(
- "cluster is not configured for RRP, "
- "you must not specify ring 1 address for the node"
- )
- (canAdd, error) = utils.canAddNodeToCluster(node0)
- if not canAdd:
- utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
-
- report_processor = lib_env.report_processor
- node_communicator = lib_env.node_communicator()
- node_addr = NodeAddresses(node0, node1)
-
- # First set up everything else than corosync. Once the new node is
- # present in corosync.conf / cluster.conf, it's considered part of a
- # cluster and the node add command cannot be run again. So we need to
- # minimize the amout of actions (and therefore possible failures) after
- # adding the node to corosync.
- try:
- # qdevice setup
- if not utils.is_rhel6():
- conf_facade = corosync_conf_facade.from_string(
- utils.getCorosyncConf()
- )
- qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
- if qdevice_model == "net":
- _add_device_model_net(
- lib_env,
- qdevice_model_options["host"],
- conf_facade.get_cluster_name(),
- [node_addr],
- skip_offline_nodes=False
- )
+ node_add(lib_env, node0, node1, modifiers)
+ else:
+ node_remove(lib_env, node0, modifiers)
- # sbd setup
- if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
- if "--watchdog" not in utils.pcs_options:
- watchdog = settings.sbd_watchdog_default
- print("Warning: using default watchdog '{0}'".format(
- watchdog
- ))
- else:
- watchdog = utils.pcs_options["--watchdog"][0]
- _ensure_cluster_is_offline_if_atb_should_be_enabled(
- lib_env, 1, modifiers["skip_offline_nodes"]
- )
+def node_add_outside_cluster(lib, argv, modifiers):
+ if len(argv) != 2:
+ raise CmdLineInputError(
+ "Usage: pcs cluster node add-outside <node[,node-altaddr]> <cluster node>"
+ )
- report_processor.process(lib_reports.sbd_check_started())
- lib_sbd.check_sbd_on_node(
- report_processor, node_communicator, node_addr, watchdog
- )
- sbd_cfg = environment_file_to_dict(
- lib_sbd.get_local_sbd_config()
- )
- report_processor.process(
- lib_reports.sbd_config_distribution_started()
+ if len(modifiers["watchdog"]) > 1:
+ raise CmdLineInputError("Multiple watchdogs defined")
+
+ node_ring0, node_ring1 = utils.parse_multiring_node(argv[0])
+ cluster_node = argv[1]
+ data = [
+ ("new_nodename", node_ring0),
+ ]
+
+ if node_ring1:
+ data.append(("new_ring1addr", node_ring1))
+ if modifiers["watchdog"]:
+ data.append(("watchdog", modifiers["watchdog"][0]))
+ if modifiers["device"]:
+ # way to send data in array
+ data += [("devices[]", device) for device in modifiers["device"]]
+
+ communicator = utils.get_lib_env().node_communicator()
+ try:
+ communicator.call_host(
+ cluster_node,
+ "remote/add_node_all",
+ communicator.format_data_dict(data),
+ )
+ except NodeCommandUnsuccessfulException as e:
+ print(e.reason)
+ except NodeCommunicationException as e:
+ process_library_reports([node_communicator_exception_to_report_item(e)])
+
+
+def node_add(lib_env, node0, node1, modifiers):
+ wait = False
+ wait_timeout = None
+ if "--start" in utils.pcs_options and "--wait" in utils.pcs_options:
+ wait_timeout = utils.validate_wait_get_timeout(False)
+ wait = True
+ need_ring1_address = utils.need_ring1_address(utils.getCorosyncConf())
+ if not node1 and need_ring1_address:
+ utils.err(
+ "cluster is configured for RRP, "
+ "you have to specify ring 1 address for the node"
+ )
+ elif node1 and not need_ring1_address:
+ utils.err(
+ "cluster is not configured for RRP, "
+ "you must not specify ring 1 address for the node"
+ )
+ node_addr = NodeAddresses(node0, node1)
+ node_communicator = lib_env.node_communicator()
+ (canAdd, error) = utils.canAddNodeToCluster(node_communicator, node_addr)
+
+ if not canAdd:
+ utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
+
+ report_processor = lib_env.report_processor
+
+ # First set up everything else than corosync. Once the new node is
+ # present in corosync.conf / cluster.conf, it's considered part of a
+ # cluster and the node add command cannot be run again. So we need to
+ # minimize the amout of actions (and therefore possible failures) after
+ # adding the node to corosync.
+ try:
+ # qdevice setup
+ if not utils.is_rhel6():
+ conf_facade = corosync_conf_facade.from_string(
+ utils.getCorosyncConf()
+ )
+ qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
+ if qdevice_model == "net":
+ _add_device_model_net(
+ lib_env,
+ qdevice_model_options["host"],
+ conf_facade.get_cluster_name(),
+ [node_addr],
+ skip_offline_nodes=False
)
- lib_sbd.set_sbd_config_on_node(
- report_processor,
- node_communicator,
- node_addr,
- sbd_cfg,
+
+ # sbd setup
+ if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
+ if "--watchdog" not in utils.pcs_options:
+ watchdog = settings.sbd_watchdog_default
+ print("Warning: using default watchdog '{0}'".format(
watchdog
- )
- report_processor.process(lib_reports.sbd_enabling_started())
- lib_sbd.enable_sbd_service_on_node(
- report_processor, node_communicator, node_addr
- )
+ ))
else:
- report_processor.process(lib_reports.sbd_disabling_started())
- lib_sbd.disable_sbd_service_on_node(
- report_processor, node_communicator, node_addr
+ watchdog = utils.pcs_options["--watchdog"][0]
+
+ _ensure_cluster_is_offline_if_atb_should_be_enabled(
+ lib_env, 1, modifiers["skip_offline_nodes"]
+ )
+
+ report_processor.process(lib_reports.sbd_check_started())
+
+ device_list = utils.pcs_options.get("--device", [])
+ device_num = len(device_list)
+ sbd_with_device = lib_sbd.is_device_set_local()
+ sbd_cfg = environment_file_to_dict(lib_sbd.get_local_sbd_config())
+
+ if sbd_with_device and device_num not in range(1, 4):
+ utils.err(
+ "SBD is configured to use shared storage, therefore it " +\
+ "is required to specify at least one device and at most " +\
+ "{0} devices (option --device),".format(
+ settings.sbd_max_device_num
+ )
+ )
+ elif not sbd_with_device and device_num > 0:
+ utils.err(
+ "SBD is not configured to use shared device, " +\
+ "therefore --device should not be specified"
)
- # booth setup
- booth_sync.send_all_config_to_node(
- node_communicator,
+ lib_sbd.check_sbd_on_node(
+ report_processor, node_communicator, node_addr, watchdog,
+ device_list
+ )
+
+ report_processor.process(
+ lib_reports.sbd_config_distribution_started()
+ )
+ lib_sbd.set_sbd_config_on_node(
report_processor,
+ node_communicator,
node_addr,
- rewrite_existing=modifiers["force"],
- skip_wrong_config=modifiers["force"]
+ sbd_cfg,
+ watchdog,
+ device_list,
)
- except LibraryError as e:
- process_library_reports(e.args)
- except NodeCommunicationException as e:
- process_library_reports(
- [node_communicator_exception_to_report_item(e)]
+ report_processor.process(lib_reports.sbd_enabling_started())
+ lib_sbd.enable_sbd_service_on_node(
+ report_processor, node_communicator, node_addr
+ )
+ else:
+ report_processor.process(lib_reports.sbd_disabling_started())
+ lib_sbd.disable_sbd_service_on_node(
+ report_processor, node_communicator, node_addr
)
- # Now add the new node to corosync.conf / cluster.conf
- corosync_conf = None
- for my_node in utils.getNodesFromCorosyncConf():
- retval, output = utils.addLocalNode(my_node, node0, node1)
- if retval != 0:
- utils.err(
- "unable to add %s on %s - %s" % (node0, my_node, output.strip()),
- False
- )
- else:
- print("%s: Corosync updated" % my_node)
- corosync_conf = output
- # corosync.conf must be reloaded before the new node is started
+ # booth setup
+ booth_sync.send_all_config_to_node(
+ node_communicator,
+ report_processor,
+ node_addr,
+ rewrite_existing=modifiers["force"],
+ skip_wrong_config=modifiers["force"]
+ )
+
+ if os.path.isfile(settings.corosync_authkey_file):
+ distribute_files(
+ lib_env.node_communicator(),
+ lib_env.report_processor,
+ node_communication_format.corosync_authkey_file(
+ open(settings.corosync_authkey_file).read()
+ ),
+ NodeAddressesList([node_addr]),
+ )
+
+ _share_authkey(
+ lib_env,
+ get_nodes(lib_env.get_corosync_conf(), lib_env.get_cib()),
+ node_addr,
+ allow_incomplete_distribution=modifiers["skip_offline_nodes"]
+ )
+
+ except LibraryError as e:
+ process_library_reports(e.args)
+ except NodeCommunicationException as e:
+ process_library_reports(
+ [node_communicator_exception_to_report_item(e)]
+ )
+
+ # Now add the new node to corosync.conf / cluster.conf
+ corosync_conf = None
+ for my_node in utils.getNodesFromCorosyncConf():
+ retval, output = utils.addLocalNode(my_node, node0, node1)
+ if retval != 0:
+ utils.err(
+ "unable to add %s on %s - %s" % (node0, my_node, output.strip()),
+ False
+ )
+ else:
+ print("%s: Corosync updated" % my_node)
+ corosync_conf = output
+ if not utils.is_cman_cluster():
+ # When corosync 2 is in use, the procedure for adding a node is:
+ # 1. add the new node to corosync.conf
+ # 2. reload corosync.conf before the new node is started
+ # 3. start the new node
+ # If done otherwise, membership gets broken a qdevice hangs. Cluster
+ # will recover after a minute or so but still it's a wrong way.
+ # When corosync 1 is in use, the procedure for adding a node is:
+ # 1. add the new node to cluster.conf
+ # 2. start the new node
+ # Starting the node will automaticall reload cluster.conf on all
+ # nodes. If the config is reloaded before the new node is started,
+ # the new node gets fenced by the cluster,
output, retval = utils.reloadCorosync()
- if corosync_conf != None:
- # send local cluster pcsd configs to the new node
- # may be used for sending corosync config as well in future
- pcsd_data = {
- 'nodes': [node0],
- 'force': True,
- }
- output, retval = utils.run_pcsdcli('send_local_configs', pcsd_data)
+ if corosync_conf != None:
+ # send local cluster pcsd configs to the new node
+ # may be used for sending corosync config as well in future
+ pcsd_data = {
+ 'nodes': [node0],
+ 'force': True,
+ }
+ output, retval = utils.run_pcsdcli('send_local_configs', pcsd_data)
+ if retval != 0:
+ utils.err("Unable to set pcsd configs")
+ if output['status'] == 'notauthorized':
+ utils.err(
+ "Unable to authenticate to " + node0
+ + ", try running 'pcs cluster auth'"
+ )
+ if output['status'] == 'ok' and output['data']:
+ try:
+ node_response = output['data'][node0]
+ if node_response['status'] not in ['ok', 'not_supported']:
+ utils.err("Unable to set pcsd configs")
+ except:
+ utils.err('Unable to communicate with pcsd')
+
+ print("Setting up corosync...")
+ utils.setCorosyncConfig(node0, corosync_conf)
+ if "--enable" in utils.pcs_options:
+ retval, err = utils.enableCluster(node0)
if retval != 0:
- utils.err("Unable to set pcsd configs")
- if output['status'] == 'notauthorized':
- utils.err(
- "Unable to authenticate to " + node0
- + ", try running 'pcs cluster auth'"
- )
- if output['status'] == 'ok' and output['data']:
- try:
- node_response = output['data'][node0]
- if node_response['status'] not in ['ok', 'not_supported']:
- utils.err("Unable to set pcsd configs")
- except:
- utils.err('Unable to communicate with pcsd')
-
- print("Setting up corosync...")
- utils.setCorosyncConfig(node0, corosync_conf)
- if "--enable" in utils.pcs_options:
- retval, err = utils.enableCluster(node0)
- if retval != 0:
- print("Warning: enable cluster - {0}".format(err))
- if "--start" in utils.pcs_options or utils.is_rhel6():
- # always start new node on cman cluster
- # otherwise it will get fenced
- retval, err = utils.startCluster(node0)
- if retval != 0:
- print("Warning: start cluster - {0}".format(err))
-
- pcsd.pcsd_sync_certs([node0], exit_after_error=False)
- else:
- utils.err("Unable to update any nodes")
- if utils.is_cman_with_udpu_transport():
- print("Warning: Using udpu transport on a CMAN cluster, "
- + "cluster restart is required to apply node addition")
- if wait:
- print()
- wait_for_nodes_started([node0], wait_timeout)
+ print("Warning: enable cluster - {0}".format(err))
+ if "--start" in utils.pcs_options or utils.is_rhel6():
+ # Always start the new node on cman cluster in order to reload
+ # cluster.conf (see above).
+ retval, err = utils.startCluster(node0)
+ if retval != 0:
+ print("Warning: start cluster - {0}".format(err))
+
+ pcsd.pcsd_sync_certs([node0], exit_after_error=False)
else:
- if node0 not in utils.getNodesFromCorosyncConf():
+ utils.err("Unable to update any nodes")
+ if utils.is_cman_with_udpu_transport():
+ print("Warning: Using udpu transport on a CMAN cluster, "
+ + "cluster restart is required to apply node addition")
+ if wait:
+ print()
+ wait_for_nodes_started([node0], wait_timeout)
+
+def node_remove(lib_env, node0, modifiers):
+ if node0 not in utils.getNodesFromCorosyncConf():
+ utils.err(
+ "node '%s' does not appear to exist in configuration" % node0
+ )
+ if "--force" not in utils.pcs_options:
+ retval, data = utils.get_remote_quorumtool_output(node0)
+ if retval != 0:
utils.err(
- "node '%s' does not appear to exist in configuration" % node0
+ "Unable to determine whether removing the node will cause "
+ + "a loss of the quorum, use --force to override\n"
+ + data
)
- if "--force" not in utils.pcs_options:
- retval, data = utils.get_remote_quorumtool_output(node0)
- if retval != 0:
- utils.err(
- "Unable to determine whether removing the node will cause "
- + "a loss of the quorum, use --force to override\n"
- + data
- )
- # we are sure whether we are on cman cluster or not because only
- # nodes from a local cluster can be stopped (see nodes validation
- # above)
- if utils.is_rhel6():
- quorum_info = utils.parse_cman_quorum_info(data)
- else:
- quorum_info = utils.parse_quorumtool_output(data)
- if quorum_info:
- if utils.is_node_stop_cause_quorum_loss(
- quorum_info, local=False, node_list=[node0]
- ):
- utils.err(
- "Removing the node will cause a loss of the quorum"
- + ", use --force to override"
- )
- elif not utils.is_node_offline_by_quorumtool_output(data):
+ # we are sure whether we are on cman cluster or not because only
+ # nodes from a local cluster can be stopped (see nodes validation
+ # above)
+ if utils.is_rhel6():
+ quorum_info = utils.parse_cman_quorum_info(data)
+ else:
+ quorum_info = utils.parse_quorumtool_output(data)
+ if quorum_info:
+ if utils.is_node_stop_cause_quorum_loss(
+ quorum_info, local=False, node_list=[node0]
+ ):
utils.err(
- "Unable to determine whether removing the node will cause "
- + "a loss of the quorum, use --force to override\n"
- + data
+ "Removing the node will cause a loss of the quorum"
+ + ", use --force to override"
)
- # else the node seems to be stopped already, we're ok to proceed
-
- try:
- _ensure_cluster_is_offline_if_atb_should_be_enabled(
- lib_env, -1, modifiers["skip_offline_nodes"]
+ elif not utils.is_node_offline_by_quorumtool_output(data):
+ utils.err(
+ "Unable to determine whether removing the node will cause "
+ + "a loss of the quorum, use --force to override\n"
+ + data
)
- except LibraryError as e:
- utils.process_library_reports(e.args)
+ # else the node seems to be stopped already, we're ok to proceed
- nodesRemoved = False
- c_nodes = utils.getNodesFromCorosyncConf()
- destroy_cluster([node0], keep_going=("--force" in utils.pcs_options))
- for my_node in c_nodes:
- if my_node == node0:
- continue
- retval, output = utils.removeLocalNode(my_node, node0)
- if retval != 0:
+ try:
+ _ensure_cluster_is_offline_if_atb_should_be_enabled(
+ lib_env, -1, modifiers["skip_offline_nodes"]
+ )
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+
+ nodesRemoved = False
+ c_nodes = utils.getNodesFromCorosyncConf()
+ destroy_cluster([node0], keep_going=("--force" in utils.pcs_options))
+ for my_node in c_nodes:
+ if my_node == node0:
+ continue
+ retval, output = utils.removeLocalNode(my_node, node0)
+ if retval != 0:
+ utils.err(
+ "unable to remove %s on %s - %s" % (node0,my_node,output.strip()),
+ False
+ )
+ else:
+ if output[0] == 0:
+ print("%s: Corosync updated" % my_node)
+ nodesRemoved = True
+ else:
utils.err(
- "unable to remove %s on %s - %s" % (node0,my_node,output.strip()),
+ "%s: Error executing command occured: %s" % (my_node, "".join(output[1])),
False
)
- else:
- if output[0] == 0:
- print("%s: Corosync updated" % my_node)
- nodesRemoved = True
- else:
- utils.err(
- "%s: Error executing command occured: %s" % (my_node, "".join(output[1])),
- False
- )
- if nodesRemoved == False:
- utils.err("Unable to update any nodes")
+ if nodesRemoved == False:
+ utils.err("Unable to update any nodes")
- output, retval = utils.reloadCorosync()
- output, retval = utils.run(["crm_node", "--force", "-R", node0])
- if utils.is_cman_with_udpu_transport():
- print("Warning: Using udpu transport on a CMAN cluster, "
- + "cluster restart is required to apply node removal")
+ output, retval = utils.reloadCorosync()
+ output, retval = utils.run(["crm_node", "--force", "-R", node0])
+ if utils.is_cman_with_udpu_transport():
+ print("Warning: Using udpu transport on a CMAN cluster, "
+ + "cluster restart is required to apply node removal")
def cluster_localnode(argv):
if len(argv) != 2:
@@ -1864,6 +2112,15 @@ def cluster_reload(argv):
# Code taken from cluster-clean script in pacemaker
def cluster_destroy(argv):
if "--all" in utils.pcs_options:
+ lib_env = utils.get_lib_env()
+ all_remote_nodes = get_nodes(tree=lib_env.get_cib())
+ if len(all_remote_nodes) > 0:
+ _destroy_pcmk_remote_env(
+ lib_env,
+ all_remote_nodes,
+ allow_fails=True
+ )
+
destroy_cluster(utils.getNodesFromCorosyncConf())
else:
print("Shutting down pacemaker/corosync services...")
@@ -1890,10 +2147,12 @@ def cluster_destroy(argv):
os.system("rm -f /etc/cluster/cluster.conf")
else:
os.system("rm -f /etc/corosync/corosync.conf")
+ os.system("rm -f {0}".format(settings.corosync_authkey_file))
state_files = ["cib.xml*", "cib-*", "core.*", "hostcache", "cts.*",
"pe*.bz2","cib.*"]
for name in state_files:
- os.system("find /var/lib -name '"+name+"' -exec rm -f \{\} \;")
+ os.system("find /var/lib/pacemaker -name '"+name+"' -exec rm -f \{\} \;")
+ os.system("rm -f {0}".format(settings.pacemaker_authkey_file))
try:
qdevice_net.client_destroy()
except:
@@ -1917,12 +2176,16 @@ def cluster_verify(argv):
else:
options.append("--xml-file")
options.append(filename)
-
output, retval = utils.run([settings.crm_verify] + options)
-
if output != "":
print(output)
- stonith.stonith_level_verify()
+
+ lib = utils.get_library_wrapper()
+ try:
+ lib.fencing_topology.verify()
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+
return retval
def cluster_report(argv):
@@ -1972,25 +2235,63 @@ def cluster_report(argv):
print(newoutput)
def cluster_remote_node(argv):
+ usage_add = """\
+ remote-node add <hostname> <resource id> [options]
+ Enables the specified resource as a remote-node resource on the
+ specified hostname (hostname should be the same as 'uname -n')."""
+ usage_remove = """\
+ remote-node remove <hostname>
+ Disables any resources configured to be remote-node resource on the
+ specified hostname (hostname should be the same as 'uname -n')."""
+
if len(argv) < 1:
- usage.cluster(["remote-node"])
+ print("\nUsage: pcs cluster remote-node...")
+ print(usage_add)
+ print()
+ print(usage_remove)
+ print()
sys.exit(1)
command = argv.pop(0)
if command == "add":
if len(argv) < 2:
- usage.cluster(["remote-node"])
+ print("\nUsage: pcs cluster remote-node add...")
+ print(usage_add)
+ print()
sys.exit(1)
+ if "--force" in utils.pcs_options:
+ warn("this command is deprecated, use 'pcs cluster node add-guest'")
+ else:
+ raise error(
+ "this command is deprecated, use 'pcs cluster node add-guest'"
+ ", use --force to override"
+ )
hostname = argv.pop(0)
rsc = argv.pop(0)
if not utils.dom_get_resource(utils.get_cib_dom(), rsc):
utils.err("unable to find resource '%s'" % rsc)
- resource.resource_update(rsc, ["meta", "remote-node="+hostname] + argv)
+ resource.resource_update(
+ rsc,
+ ["meta", "remote-node="+hostname] + argv,
+ deal_with_guest_change=False
+ )
elif command in ["remove","delete"]:
if len(argv) < 1:
- usage.cluster(["remote-node"])
+ print("\nUsage: pcs cluster remote-node remove...")
+ print(usage_remove)
+ print()
sys.exit(1)
+ if "--force" in utils.pcs_options:
+ warn(
+ "this command is deprecated, use"
+ " 'pcs cluster node remove-guest'"
+ )
+ else:
+ raise error(
+ "this command is deprecated, use 'pcs cluster node"
+ " remove-guest', use --force to override"
+ )
hostname = argv.pop(0)
dom = utils.get_cib_dom()
nvpairs = dom.getElementsByTagName("nvpair")
@@ -2015,6 +2316,9 @@ def cluster_remote_node(argv):
if retval != 0:
utils.err("unable to remove: {0}".format(output))
else:
- usage.cluster(["remote-node"])
+ print("\nUsage: pcs cluster remote-node...")
+ print(usage_add)
+ print()
+ print(usage_remove)
+ print()
sys.exit(1)
-
diff --git a/pcs/common/env_file_role_codes.py b/pcs/common/env_file_role_codes.py
index 1f47387..ff777ae 100644
--- a/pcs/common/env_file_role_codes.py
+++ b/pcs/common/env_file_role_codes.py
@@ -7,3 +7,4 @@ from __future__ import (
BOOTH_CONFIG = "BOOTH_CONFIG"
BOOTH_KEY = "BOOTH_KEY"
+PACEMAKER_AUTHKEY = "PACEMAKER_AUTHKEY"
diff --git a/pcs/common/env_file_role_codes.py b/pcs/common/fencing_topology.py
similarity index 53%
copy from pcs/common/env_file_role_codes.py
copy to pcs/common/fencing_topology.py
index 1f47387..24fd15b 100644
--- a/pcs/common/env_file_role_codes.py
+++ b/pcs/common/fencing_topology.py
@@ -5,5 +5,6 @@ from __future__ import (
unicode_literals,
)
-BOOTH_CONFIG = "BOOTH_CONFIG"
-BOOTH_KEY = "BOOTH_KEY"
+TARGET_TYPE_NODE = "node"
+TARGET_TYPE_REGEXP = "regexp"
+TARGET_TYPE_ATTRIBUTE = "attribute"
diff --git a/pcs/common/pcs_pycurl.py b/pcs/common/pcs_pycurl.py
new file mode 100644
index 0000000..4e94eeb
--- /dev/null
+++ b/pcs/common/pcs_pycurl.py
@@ -0,0 +1,34 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import sys
+from pycurl import *
+
+# This package defines constants which are not present in some older versions
+# of pycurl but pcs needs to use them
+
+required_constants = {
+ "PROTOCOLS": 181,
+ "PROTO_HTTPS": 2,
+ "E_OPERATION_TIMEDOUT": 28,
+ # these are types of debug messages
+ # see https://curl.haxx.se/libcurl/c/CURLOPT_DEBUGFUNCTION.html
+ "DEBUG_TEXT": 0,
+ "DEBUG_HEADER_IN": 1,
+ "DEBUG_HEADER_OUT": 2,
+ "DEBUG_DATA_IN": 3,
+ "DEBUG_DATA_OUT": 4,
+ "DEBUG_SSL_DATA_IN": 5,
+ "DEBUG_SSL_DATA_OUT": 6,
+ "DEBUG_END": 7,
+}
+
+__current_module = sys.modules[__name__]
+
+for constant, value in required_constants.items():
+ if not hasattr(__current_module, constant):
+ setattr(__current_module, constant, value)
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index f5968fa..2e96f6b 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -10,15 +10,22 @@ FORCE_ACTIVE_RRP = "ACTIVE_RRP"
FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE"
FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY"
FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB"
+FORCE_REMOVE_MULTIPLE_NODES = "FORCE_REMOVE_MULTIPLE_NODES"
FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE"
FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE"
FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
FORCE_METADATA_ISSUE = "METADATA_ISSUE"
+FORCE_NODE_DOES_NOT_EXIST = "FORCE_NODE_DOES_NOT_EXIST"
FORCE_OPTIONS = "OPTIONS"
FORCE_QDEVICE_MODEL = "QDEVICE_MODEL"
FORCE_QDEVICE_USED = "QDEVICE_USED"
+FORCE_STONITH_RESOURCE_DOES_NOT_EXIST = "FORCE_STONITH_RESOURCE_DOES_NOT_EXIST"
+FORCE_NOT_SUITABLE_COMMAND = "FORCE_NOT_SUITABLE_COMMAND"
+FORCE_CLEAR_CLUSTER_NODE = "FORCE_CLEAR_CLUSTER_NODE"
SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
+SKIP_FILE_DISTRIBUTION_ERRORS = "SKIP_FILE_DISTRIBUTION_ERRORS"
+SKIP_ACTION_ON_NODES_ERRORS = "SKIP_ACTION_ON_NODES_ERRORS"
SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG"
AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = "AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE"
@@ -56,14 +63,16 @@ BOOTH_UNSUPORTED_FILE_LOCATION = "BOOTH_UNSUPORTED_FILE_LOCATION"
CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET = "CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET"
CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET = "CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET"
CIB_ACL_TARGET_ALREADY_EXISTS = "CIB_ACL_TARGET_ALREADY_EXISTS"
-CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID"
CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
+CIB_FENCING_LEVEL_ALREADY_EXISTS = "CIB_FENCING_LEVEL_ALREADY_EXISTS"
+CIB_FENCING_LEVEL_DOES_NOT_EXIST = "CIB_FENCING_LEVEL_DOES_NOT_EXIST"
CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING"
CIB_PUSH_ERROR = "CIB_PUSH_ERROR"
+CIB_SAVE_TMP_ERROR = "CIB_SAVE_TMP_ERROR"
CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED"
CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION"
CIB_UPGRADE_SUCCESSFUL = "CIB_UPGRADE_SUCCESSFUL"
@@ -76,6 +85,7 @@ CMAN_UNSUPPORTED_COMMAND = "CMAN_UNSUPPORTED_COMMAND"
COMMON_ERROR = 'COMMON_ERROR'
COMMON_INFO = 'COMMON_INFO'
LIVE_ENVIRONMENT_REQUIRED = "LIVE_ENVIRONMENT_REQUIRED"
+LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE = "LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE"
COROSYNC_CONFIG_ACCEPTED_BY_NODE = "COROSYNC_CONFIG_ACCEPTED_BY_NODE"
COROSYNC_CONFIG_DISTRIBUTION_STARTED = "COROSYNC_CONFIG_DISTRIBUTION_STARTED"
COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR = "COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR"
@@ -96,34 +106,57 @@ EMPTY_ID = "EMPTY_ID"
FILE_ALREADY_EXISTS = "FILE_ALREADY_EXISTS"
FILE_DOES_NOT_EXIST = "FILE_DOES_NOT_EXIST"
FILE_IO_ERROR = "FILE_IO_ERROR"
+FILES_DISTRIBUTION_STARTED = "FILES_DISTRIBUTION_STARTED"
+FILE_DISTRIBUTION_ERROR = "FILE_DISTRIBUTION_ERROR"
+FILE_DISTRIBUTION_SUCCESS = "FILE_DISTRIBUTION_SUCCESS"
+FILES_REMOVE_FROM_NODE_STARTED = "FILES_REMOVE_FROM_NODE_STARTED"
+FILE_REMOVE_FROM_NODE_ERROR = "FILE_REMOVE_FROM_NODE_ERROR"
+FILE_REMOVE_FROM_NODE_SUCCESS = "FILE_REMOVE_FROM_NODE_SUCCESS"
ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
+ID_BELONGS_TO_UNEXPECTED_TYPE = "ID_BELONGS_TO_UNEXPECTED_TYPE"
ID_NOT_FOUND = 'ID_NOT_FOUND'
IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
INVALID_ID = "INVALID_ID"
INVALID_OPTION = "INVALID_OPTION"
+INVALID_OPTION_TYPE = "INVALID_OPTION_TYPE"
INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE"
INVALID_RESOURCE_NAME = 'INVALID_RESOURCE_NAME'
INVALID_RESOURCE_AGENT_NAME = 'INVALID_RESOURCE_AGENT_NAME'
INVALID_RESPONSE_FORMAT = "INVALID_RESPONSE_FORMAT"
INVALID_SCORE = "INVALID_SCORE"
+INVALID_STONITH_AGENT_NAME = "INVALID_STONITH_AGENT_NAME"
INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE"
MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS"
+MULTIPLE_RESULTS_FOUND = "MULTIPLE_RESULTS_FOUND"
+MUTUALLY_EXCLUSIVE_OPTIONS = "MUTUALLY_EXCLUSIVE_OPTIONS"
+CANNOT_ADD_NODE_IS_IN_CLUSTER = "CANNOT_ADD_NODE_IS_IN_CLUSTER"
+CANNOT_ADD_NODE_IS_RUNNING_SERVICE = "CANNOT_ADD_NODE_IS_RUNNING_SERVICE"
NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL"
+NODE_COMMUNICATION_DEBUG_INFO = "NODE_COMMUNICATION_DEBUG_INFO"
NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR"
NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED"
NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED"
NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT"
NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND"
+NODE_COMMUNICATION_ERROR_TIMED_OUT = "NODE_COMMUNICATION_ERROR_TIMED_OUT"
NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED"
NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED"
+NODE_COMMUNICATION_PROXY_IS_SET = "NODE_COMMUNICATION_PROXY_IS_SET"
NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED"
NODE_NOT_FOUND = "NODE_NOT_FOUND"
+NODE_REMOVE_IN_PACEMAKER_FAILED = "NODE_REMOVE_IN_PACEMAKER_FAILED"
NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH'
+NOLIVE_SKIP_FILES_DISTRIBUTION="NOLIVE_SKIP_FILES_DISTRIBUTION"
+NOLIVE_SKIP_FILES_REMOVE="NOLIVE_SKIP_FILES_REMOVE"
+NOLIVE_SKIP_SERVICE_COMMAND_ON_NODES="NOLIVE_SKIP_SERVICE_COMMAND_ON_NODES"
+NODE_TO_CLEAR_IS_STILL_IN_CLUSTER = "NODE_TO_CLEAR_IS_STILL_IN_CLUSTER"
OMITTING_NODE = "OMITTING_NODE"
+OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT = "OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT"
PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND"
PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE"
PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF"
PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE"
+PREREQUISITE_OPTION_IS_MISSING = "PREREQUISITE_OPTION_IS_MISSING"
QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED"
QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED"
QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = "QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE"
@@ -144,26 +177,43 @@ QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED"
QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED"
QDEVICE_USED_BY_CLUSTERS = "QDEVICE_USED_BY_CLUSTERS"
REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING"
+REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING = "REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING"
+RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE = "RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE"
+RESOURCE_CANNOT_BE_NEXT_TO_ITSELF_IN_GROUP = "RESOURCE_CANNOT_BE_NEXT_TO_ITSELF_IN_GROUP"
RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING'
-RESOURCE_DOES_NOT_EXIST = 'RESOURCE_DOES_NOT_EXIST'
+RESOURCE_DOES_NOT_RUN = "RESOURCE_DOES_NOT_RUN"
RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE = 'RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE'
-RESOURCE_WAIT_ERROR = "RESOURCE_WAIT_ERROR"
-RESOURCE_WAIT_NOT_SUPPORTED = "RESOURCE_WAIT_NOT_SUPPORTED"
-RESOURCE_WAIT_TIMED_OUT = "RESOURCE_WAIT_TIMED_OUT"
+RESOURCE_IS_GUEST_NODE_ALREADY = "RESOURCE_IS_GUEST_NODE_ALREADY"
+RESOURCE_IS_UNMANAGED = "RESOURCE_IS_UNMANAGED"
+RESOURCE_MANAGED_NO_MONITOR_ENABLED = "RESOURCE_MANAGED_NO_MONITOR_ENABLED"
+RESOURCE_OPERATION_INTERVAL_DUPLICATION = "RESOURCE_OPERATION_INTERVAL_DUPLICATION"
+RESOURCE_OPERATION_INTERVAL_ADAPTED = "RESOURCE_OPERATION_INTERVAL_ADAPTED"
+RESOURCE_RUNNING_ON_NODES = "RESOURCE_RUNNING_ON_NODES"
RRP_ACTIVE_NOT_SUPPORTED = 'RRP_ACTIVE_NOT_SUPPORTED'
RUN_EXTERNAL_PROCESS_ERROR = "RUN_EXTERNAL_PROCESS_ERROR"
RUN_EXTERNAL_PROCESS_FINISHED = "RUN_EXTERNAL_PROCESS_FINISHED"
RUN_EXTERNAL_PROCESS_STARTED = "RUN_EXTERNAL_PROCESS_STARTED"
SBD_CHECK_STARTED = "SBD_CHECK_STARTED"
SBD_CHECK_SUCCESS = "SBD_CHECK_SUCCESS"
-SBD_CONFIG_DISTRIBUTION_STARTED = "SBD_CONFIG_DISTRIBUTION_STARTED"
SBD_CONFIG_ACCEPTED_BY_NODE = "SBD_CONFIG_ACCEPTED_BY_NODE"
+SBD_CONFIG_DISTRIBUTION_STARTED = "SBD_CONFIG_DISTRIBUTION_STARTED"
+SBD_DEVICE_DOES_NOT_EXIST = "SBD_DEVICE_DOES_NOT_EXIST"
+SBD_DEVICE_DUMP_ERROR = "SBD_DEVICE_DUMP_ERROR"
+SBD_DEVICE_INITIALIZATION_ERROR = "SBD_DEVICE_INITIALIZATION_ERROR"
+SBD_DEVICE_INITIALIZATION_STARTED = "SBD_DEVICE_INITIALIZATION_STARTED"
+SBD_DEVICE_INITIALIZATION_SUCCESS = "SBD_DEVICE_INITIALIZATION_SUCCESS"
+SBD_DEVICE_IS_NOT_BLOCK_DEVICE = "SBD_DEVICE_IS_NOT_BLOCK_DEVICE"
+SBD_DEVICE_LIST_ERROR = "SBD_DEVICE_LIST_ERROR"
+SBD_DEVICE_MESSAGE_ERROR = "SBD_DEVICE_MESSAGE_ERROR"
+SBD_DEVICE_PATH_NOT_ABSOLUTE = "SBD_DEVICE_PATH_NOT_ABSOLUTE"
SBD_DISABLING_STARTED = "SBD_DISABLING_STARTED"
SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED"
-SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED"
+SBD_NO_DEVICE_FOR_NODE = "SBD_NO_DEVICE_FOR_NODE"
SBD_NOT_ENABLED = "SBD_NOT_ENABLED"
+SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED"
SBD_REQUIRES_ATB = "SBD_REQUIRES_ATB"
+SBD_TOO_MANY_DEVICES_FOR_NODE = "SBD_TOO_MANY_DEVICES_FOR_NODE"
SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR"
SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED"
SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS"
@@ -180,6 +230,10 @@ SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS"
SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR"
SERVICE_STOP_STARTED = "SERVICE_STOP_STARTED"
SERVICE_STOP_SUCCESS = "SERVICE_STOP_SUCCESS"
+STONITH_RESOURCES_DO_NOT_EXIST = "STONITH_RESOURCES_DO_NOT_EXIST"
+SERVICE_COMMANDS_ON_NODES_STARTED = "SERVICE_COMMANDS_ON_NODES_STARTED"
+SERVICE_COMMAND_ON_NODE_ERROR = "SERVICE_COMMAND_ON_NODE_ERROR"
+SERVICE_COMMAND_ON_NODE_SUCCESS = "SERVICE_COMMAND_ON_NODE_SUCCESS"
UNABLE_TO_DETERMINE_USER_UID = "UNABLE_TO_DETERMINE_USER_UID"
UNABLE_TO_DETERMINE_GROUP_GID = "UNABLE_TO_DETERMINE_GROUP_GID"
UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA'
@@ -189,4 +243,11 @@ UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS"
UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
WATCHDOG_INVALID = "WATCHDOG_INVALID"
UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS"
+USE_COMMAND_NODE_ADD_REMOTE = "USE_COMMAND_NODE_ADD_REMOTE"
+USE_COMMAND_NODE_ADD_GUEST = "USE_COMMAND_NODE_ADD_GUEST"
+USE_COMMAND_NODE_REMOVE_GUEST = "USE_COMMAND_NODE_REMOVE_GUEST"
+WAIT_FOR_IDLE_ERROR = "WAIT_FOR_IDLE_ERROR"
+WAIT_FOR_IDLE_NOT_LIVE_CLUSTER = "WAIT_FOR_IDLE_NOT_LIVE_CLUSTER"
+WAIT_FOR_IDLE_NOT_SUPPORTED = "WAIT_FOR_IDLE_NOT_SUPPORTED"
+WAIT_FOR_IDLE_TIMED_OUT = "WAIT_FOR_IDLE_TIMED_OUT"
WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND"
diff --git a/pcs/common/test/test_tools.py b/pcs/common/test/test_tools.py
new file mode 100644
index 0000000..8a042d0
--- /dev/null
+++ b/pcs/common/test/test_tools.py
@@ -0,0 +1,24 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.common.tools import is_string
+
+class IsString(TestCase):
+ def test_recognize_plain_string(self):
+ self.assertTrue(is_string(""))
+
+ def test_recognize_unicode_string(self):
+ #in python3 this is str type
+ self.assertTrue(is_string(u""))
+
+ def test_rcognize_bytes(self):
+ #in python3 this is str type
+ self.assertTrue(is_string(b""))
+
+ def test_list_of_string_is_not_string(self):
+ self.assertFalse(is_string(["a", "b"]))
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
index 01194a5..bd4ce21 100644
--- a/pcs/common/tools.py
+++ b/pcs/common/tools.py
@@ -5,6 +5,7 @@ from __future__ import (
unicode_literals,
)
+from lxml import etree
import threading
@@ -41,3 +42,31 @@ def format_environment_error(e):
def join_multilines(strings):
return "\n".join([a.strip() for a in strings if a.strip()])
+
+def is_string(candidate):
+ """
+ Return if candidate is string.
+ Simply lookin solution isinstance(candidate, "".__class__) does not work:
+
+ >>> isinstance("", "".__class__), isinstance(u"", "".__class__)
+ (True, False)
+
+ This code also needs to deal with python2 and python3 and unicode type is in
+ python2 but not in python3.
+ """
+ string_list = [str, bytes]
+ try:
+ string_list.append(unicode)
+ except NameError: #unicode is not present in python3
+ pass
+
+ return any([isinstance(candidate, string) for string in string_list])
+
+def xml_fromstring(xml):
+ # If the xml contains encoding declaration such as:
+ # <?xml version="1.0" encoding="UTF-8"?>
+ # we get an exception in python3:
+ # ValueError: Unicode strings with encoding declaration are not supported.
+ # Please use bytes input or XML fragments without declaration.
+ # So we encode the string to bytes.
+ return etree.fromstring(xml.encode("utf-8"))
diff --git a/pcs/config.py b/pcs/config.py
index e410a5a..94191e1 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -17,8 +17,10 @@ from xml.dom.minidom import parse
import logging
import pwd
import grp
+import tempfile
import time
import platform
+import shutil
try:
import clufter.facts
@@ -122,20 +124,26 @@ def config_show(argv):
utils.process_library_reports(e.args)
def config_show_cib():
+ lib = utils.get_library_wrapper()
+ modificators = utils.get_modificators()
+
print("Resources:")
utils.pcs_options["--all"] = 1
utils.pcs_options["--full"] = 1
resource.resource_show([])
+
print()
print("Stonith Devices:")
resource.resource_show([], True)
print("Fencing Levels:")
- stonith.stonith_level_show()
- print()
+ levels = stonith.stonith_level_config_to_str(
+ lib.fencing_topology.get_config()
+ )
+ if levels:
+ print("\n".join(indent(levels, 2)))
- lib = utils.get_library_wrapper()
+ print()
constraint.location_show([])
- modificators = utils.get_modificators()
order_command.show(lib, [], modificators)
colocation_command.show(lib, [], modificators)
ticket_command.show(lib, [], modificators)
@@ -341,6 +349,7 @@ def config_restore_local(infile_name, infile_obj):
file_list = config_backup_path_list(with_uid_gid=True)
tarball_file_list = []
version = None
+ tmp_dir = None
try:
tarball = tarfile.open(infile_name, "r|*", infile_obj)
while True:
@@ -387,15 +396,30 @@ def config_restore_local(infile_name, infile_obj):
path = os.path.dirname(path)
if not extract_info:
continue
- path_extract = os.path.dirname(extract_info["path"])
- tarball.extractall(path_extract, [tar_member_info])
- path_full = os.path.join(path_extract, tar_member_info.name)
+ path_full = None
+ if hasattr(extract_info.get("pre_store_call"), '__call__'):
+ extract_info["pre_store_call"]()
+ if "rename" in extract_info and extract_info["rename"]:
+ if tmp_dir is None:
+ tmp_dir = tempfile.mkdtemp()
+ tarball.extractall(tmp_dir, [tar_member_info])
+ path_full = extract_info["path"]
+ os.rename(
+ os.path.join(tmp_dir, tar_member_info.name), path_full
+ )
+ else:
+ dir_path = os.path.dirname(extract_info["path"])
+ tarball.extractall(dir_path, [tar_member_info])
+ path_full = os.path.join(dir_path, tar_member_info.name)
file_attrs = extract_info["attrs"]
os.chmod(path_full, file_attrs["mode"])
os.chown(path_full, file_attrs["uid"], file_attrs["gid"])
tarball.close()
- except (tarfile.TarError, EnvironmentError) as e:
+ except (tarfile.TarError, EnvironmentError, OSError) as e:
utils.err("unable to restore the cluster: %s" % e)
+ finally:
+ if tmp_dir:
+ shutil.rmtree(tmp_dir, ignore_errors=True)
try:
sig_path = os.path.join(settings.cib_dir, "cib.xml.sig")
@@ -414,25 +438,19 @@ def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
"uid": 0,
"gid": 0,
}
+ corosync_authkey_attrs = dict(corosync_attrs)
+ corosync_authkey_attrs["mode"] = 0o400
cib_attrs = {
"mtime": int(time.time()),
"mode": 0o600,
"uname": settings.pacemaker_uname,
"gname": settings.pacemaker_gname,
}
+ pcmk_authkey_attrs = dict(cib_attrs)
+ pcmk_authkey_attrs["mode"] = 0o440
if with_uid_gid:
- try:
- cib_attrs["uid"] = pwd.getpwnam(cib_attrs["uname"]).pw_uid
- except KeyError:
- utils.err(
- "Unable to determine uid of user '%s'" % cib_attrs["uname"]
- )
- try:
- cib_attrs["gid"] = grp.getgrnam(cib_attrs["gname"]).gr_gid
- except KeyError:
- utils.err(
- "Unable to determine gid of group '%s'" % cib_attrs["gname"]
- )
+ cib_attrs["uid"] = _get_uid(cib_attrs["uname"])
+ cib_attrs["gid"] = _get_gid(cib_attrs["gname"])
file_list = {
"cib.xml": {
@@ -440,6 +458,21 @@ def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
"required": True,
"attrs": dict(cib_attrs),
},
+ "corosync_authkey": {
+ "path": settings.corosync_authkey_file,
+ "required": False,
+ "attrs": corosync_authkey_attrs,
+ "restore_procedure": None,
+ "rename": True,
+ },
+ "pacemaker_authkey": {
+ "path": settings.pacemaker_authkey_file,
+ "required": False,
+ "attrs": pcmk_authkey_attrs,
+ "restore_procedure": None,
+ "rename": True,
+ "pre_store_call": _ensure_etc_pacemaker_exists,
+ },
}
if rhel6:
file_list["cluster.conf"] = {
@@ -472,6 +505,35 @@ def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
}
return file_list
+
+def _get_uid(user_name):
+ try:
+ return pwd.getpwnam(user_name).pw_uid
+ except KeyError:
+ utils.err("Unable to determine uid of user '{0}'".format(user_name))
+
+
+def _get_gid(group_name):
+ try:
+ return grp.getgrnam(group_name).gr_gid
+ except KeyError:
+ utils.err(
+ "Unable to determine gid of group '{0}'".format(group_name)
+ )
+
+
+def _ensure_etc_pacemaker_exists():
+ dir_name = os.path.dirname(settings.pacemaker_authkey_file)
+ if not os.path.exists(dir_name):
+ os.mkdir(dir_name)
+ os.chmod(dir_name, 0o750)
+ os.chown(
+ dir_name,
+ _get_uid(settings.pacemaker_uname),
+ _get_gid(settings.pacemaker_gname)
+ )
+
+
def config_backup_check_version(version):
try:
version_number = int(version)
@@ -621,8 +683,6 @@ def config_import_cman(argv):
"batch": True,
"sys": "linux",
"dist": dist,
- # Make it work on RHEL6 as well for sure
- "color": "always" if sys.stdout.isatty() else "never"
}
if interactive:
if "EDITOR" not in os.environ:
@@ -670,7 +730,7 @@ def config_import_cman(argv):
if output_format in ("pcs-commands", "pcs-commands-verbose"):
ok, message = utils.write_file(
dry_run_output,
- clufter_args_obj.output["passout"]
+ clufter_args_obj.output["passout"].decode()
)
if not ok:
utils.err(message)
@@ -692,14 +752,14 @@ def config_import_cman(argv):
config_backup_add_version_to_tarball(tarball)
utils.tar_add_file_data(
tarball,
- clufter_args_obj.cib["passout"].encode("utf-8"),
+ clufter_args_obj.cib["passout"],
"cib.xml",
**file_list["cib.xml"]["attrs"]
)
if output_format == "cluster.conf":
utils.tar_add_file_data(
tarball,
- clufter_args_obj.ccs_pcmk["passout"].encode("utf-8"),
+ clufter_args_obj.ccs_pcmk["passout"],
"cluster.conf",
**file_list["cluster.conf"]["attrs"]
)
@@ -720,7 +780,7 @@ def config_import_cman(argv):
)("bytestring")
utils.tar_add_file_data(
tarball,
- corosync_conf_data.encode("utf-8"),
+ corosync_conf_data,
"corosync.conf",
**file_list["corosync.conf"]["attrs"]
)
@@ -738,7 +798,7 @@ def config_import_cman(argv):
)("bytestring")
utils.tar_add_file_data(
tarball,
- uidgid_data.encode("utf-8"),
+ uidgid_data,
"uidgid.d/" + filename,
**file_list["uidgid.d"]["attrs"]
)
@@ -796,8 +856,6 @@ def config_export_pcs_commands(argv, verbose=False):
"batch": True,
"sys": "linux",
"dist": dist,
- # Make it work on RHEL6 as well for sure
- "color": "always" if sys.stdout.isatty() else "never",
"coro": settings.corosync_conf_file,
"ccs": settings.cluster_conf_file,
"start_wait": "60",
@@ -839,7 +897,7 @@ def config_export_pcs_commands(argv, verbose=False):
if output_file:
ok, message = utils.write_file(
output_file,
- clufter_args_obj.output["passout"]
+ clufter_args_obj.output["passout"].decode()
)
if not ok:
utils.err(message)
diff --git a/pcs/constraint.py b/pcs/constraint.py
index d8415b6..6e1a16f 100644
--- a/pcs/constraint.py
+++ b/pcs/constraint.py
@@ -10,8 +10,6 @@ import xml.dom.minidom
from collections import defaultdict
from xml.dom.minidom import parseString
-import pcs.cli.constraint_colocation.command as colocation_command
-import pcs.cli.constraint_order.command as order_command
from pcs import (
rule as rule_utils,
usage,
@@ -21,11 +19,15 @@ from pcs.cli import (
constraint_colocation,
constraint_order,
)
-from pcs.cli.constraint_ticket import command as ticket_command
+from pcs.cli.common import parse_args
from pcs.cli.common.errors import CmdLineInputError
+import pcs.cli.constraint_colocation.command as colocation_command
+import pcs.cli.constraint_order.command as order_command
+from pcs.cli.constraint_ticket import command as ticket_command
from pcs.lib.cib.constraint import resource_set
from pcs.lib.cib.constraint.order import ATTRIB as order_attrib
from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker.values import sanitize_id
OPTIONS_ACTION = resource_set.ATTRIB["action"]
@@ -36,111 +38,120 @@ DEFAULT_ROLE = "Started"
OPTIONS_SYMMETRICAL = order_attrib["symmetrical"]
OPTIONS_KIND = order_attrib["kind"]
+RESOURCE_TYPE_RESOURCE = "resource"
+RESOURCE_TYPE_REGEXP = "regexp"
+
def constraint_cmd(argv):
lib = utils.get_library_wrapper()
modificators = utils.get_modificators()
+
if len(argv) == 0:
argv = ["list"]
-
sub_cmd = argv.pop(0)
- if (sub_cmd == "help"):
- usage.constraint(argv)
- elif (sub_cmd == "location"):
- if len (argv) == 0:
- sub_cmd2 = "show"
- else:
- sub_cmd2 = argv.pop(0)
- if (sub_cmd2 == "add"):
- location_add(argv)
- elif (sub_cmd2 in ["remove","delete"]):
- location_add(argv,True)
- elif (sub_cmd2 == "show"):
- location_show(argv)
- elif len(argv) >= 2:
- if argv[0] == "rule":
- location_rule([sub_cmd2] + argv)
+ try:
+ if (sub_cmd == "help"):
+ usage.constraint(argv)
+ elif (sub_cmd == "location"):
+ if len (argv) == 0:
+ sub_cmd2 = "show"
else:
- location_prefer([sub_cmd2] + argv)
- else:
- usage.constraint()
- sys.exit(1)
- elif (sub_cmd == "order"):
- if (len(argv) == 0):
- sub_cmd2 = "show"
- else:
- sub_cmd2 = argv.pop(0)
-
- if (sub_cmd2 == "set"):
- try:
- order_command.create_with_set(lib, argv, modificators)
- except CmdLineInputError as e:
- utils.exit_on_cmdline_input_errror(e, "constraint", 'order set')
- except LibraryError as e:
- utils.process_library_reports(e.args)
- elif (sub_cmd2 in ["remove","delete"]):
- order_rm(argv)
- elif (sub_cmd2 == "show"):
- order_command.show(lib, argv, modificators)
- else:
- order_start([sub_cmd2] + argv)
- elif sub_cmd == "ticket":
- usage_name = "ticket"
- try:
- command_map = {
- "set": ticket_command.create_with_set,
- "add": ticket_command.add,
- "remove": ticket_command.remove,
- "show": ticket_command.show,
- }
- sub_command = argv[0] if argv else "show"
- if sub_command not in command_map:
- raise CmdLineInputError()
- usage_name = "ticket "+sub_command
-
- command_map[sub_command](lib, argv[1:], modificators)
- except LibraryError as e:
- utils.process_library_reports(e.args)
- except CmdLineInputError as e:
- utils.exit_on_cmdline_input_errror(e, "constraint", usage_name)
-
- elif (sub_cmd == "colocation"):
- if (len(argv) == 0):
- sub_cmd2 = "show"
- else:
- sub_cmd2 = argv.pop(0)
-
- if (sub_cmd2 == "add"):
- colocation_add(argv)
- elif (sub_cmd2 in ["remove","delete"]):
- colocation_rm(argv)
- elif (sub_cmd2 == "set"):
+ sub_cmd2 = argv.pop(0)
+
+ if (sub_cmd2 == "add"):
+ location_add(argv)
+ elif (sub_cmd2 in ["remove","delete"]):
+ location_add(argv,True)
+ elif (sub_cmd2 == "show"):
+ location_show(argv)
+ elif len(argv) >= 2:
+ if argv[0] == "rule":
+ location_rule([sub_cmd2] + argv)
+ else:
+ location_prefer([sub_cmd2] + argv)
+ else:
+ usage.constraint()
+ sys.exit(1)
+ elif (sub_cmd == "order"):
+ if (len(argv) == 0):
+ sub_cmd2 = "show"
+ else:
+ sub_cmd2 = argv.pop(0)
+
+ if (sub_cmd2 == "set"):
+ try:
+ order_command.create_with_set(lib, argv, modificators)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "constraint", 'order set')
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ elif (sub_cmd2 in ["remove","delete"]):
+ order_rm(argv)
+ elif (sub_cmd2 == "show"):
+ order_command.show(lib, argv, modificators)
+ else:
+ order_start([sub_cmd2] + argv)
+ elif sub_cmd == "ticket":
+ usage_name = "ticket"
try:
-
- colocation_command.create_with_set(lib, argv, modificators)
+ command_map = {
+ "set": ticket_command.create_with_set,
+ "add": ticket_command.add,
+ "remove": ticket_command.remove,
+ "show": ticket_command.show,
+ }
+ sub_command = argv[0] if argv else "show"
+ if sub_command not in command_map:
+ raise CmdLineInputError()
+ usage_name = "ticket "+sub_command
+
+ command_map[sub_command](lib, argv[1:], modificators)
except LibraryError as e:
utils.process_library_reports(e.args)
except CmdLineInputError as e:
- utils.exit_on_cmdline_input_errror(e, "constraint", "colocation set")
- elif (sub_cmd2 == "show"):
+ utils.exit_on_cmdline_input_errror(e, "constraint", usage_name)
+
+ elif (sub_cmd == "colocation"):
+ if (len(argv) == 0):
+ sub_cmd2 = "show"
+ else:
+ sub_cmd2 = argv.pop(0)
+
+ if (sub_cmd2 == "add"):
+ colocation_add(argv)
+ elif (sub_cmd2 in ["remove","delete"]):
+ colocation_rm(argv)
+ elif (sub_cmd2 == "set"):
+ try:
+
+ colocation_command.create_with_set(lib, argv, modificators)
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "constraint", "colocation set")
+ elif (sub_cmd2 == "show"):
+ colocation_command.show(lib, argv, modificators)
+ else:
+ usage.constraint()
+ sys.exit(1)
+ elif (sub_cmd in ["remove","delete"]):
+ constraint_rm(argv)
+ elif (sub_cmd == "show" or sub_cmd == "list"):
+ location_show(argv)
+ order_command.show(lib, argv, modificators)
colocation_command.show(lib, argv, modificators)
+ ticket_command.show(lib, argv, modificators)
+ elif (sub_cmd == "ref"):
+ constraint_ref(argv)
+ elif (sub_cmd == "rule"):
+ constraint_rule(argv)
else:
usage.constraint()
sys.exit(1)
- elif (sub_cmd in ["remove","delete"]):
- constraint_rm(argv)
- elif (sub_cmd == "show" or sub_cmd == "list"):
- location_show(argv)
- order_command.show(lib, argv, modificators)
- colocation_command.show(lib, argv, modificators)
- ticket_command.show(lib, argv, modificators)
- elif (sub_cmd == "ref"):
- constraint_ref(argv)
- elif (sub_cmd == "rule"):
- constraint_rule(argv)
- else:
- usage.constraint()
- sys.exit(1)
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "resource", sub_cmd)
@@ -526,7 +537,17 @@ def location_show(argv):
showDetail = False
if len(argv) > 1:
- valid_noderes = argv[1:]
+ if byNode:
+ valid_noderes = argv[1:]
+ else:
+ valid_noderes = [
+ parse_args.parse_typed_arg(
+ arg,
+ [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP],
+ RESOURCE_TYPE_RESOURCE
+ )
+ for arg in argv[1:]
+ ]
else:
valid_noderes = []
@@ -540,17 +561,24 @@ def location_show(argv):
print("Location Constraints:")
for rsc_loc in all_loc_constraints:
- lc_node = rsc_loc.getAttribute("node")
- lc_rsc = rsc_loc.getAttribute("rsc")
+ if rsc_loc.hasAttribute("rsc-pattern"):
+ lc_rsc_type = RESOURCE_TYPE_REGEXP
+ lc_rsc_value = rsc_loc.getAttribute("rsc-pattern")
+ lc_name = "Resource pattern: {0}".format(lc_rsc_value)
+ else:
+ lc_rsc_type = RESOURCE_TYPE_RESOURCE
+ lc_rsc_value = rsc_loc.getAttribute("rsc")
+ lc_name = "Resource: {0}".format(lc_rsc_value)
+ lc_rsc = lc_rsc_type, lc_rsc_value, lc_name
lc_id = rsc_loc.getAttribute("id")
+ lc_node = rsc_loc.getAttribute("node")
lc_score = rsc_loc.getAttribute("score")
lc_role = rsc_loc.getAttribute("role")
- lc_name = "Resource: " + lc_rsc
lc_resource_discovery = rsc_loc.getAttribute("resource-discovery")
for child in rsc_loc.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.tagName == "rule":
- ruleshash[lc_name].append(child)
+ ruleshash[lc_rsc].append(child)
# NEED TO FIX FOR GROUP LOCATION CONSTRAINTS (where there are children of
# rsc_location)
@@ -573,18 +601,36 @@ def location_show(argv):
nodeshash = nodehashoff
rschash = rschashoff
+ hash_element = {
+ "id": lc_id,
+ "rsc_type": lc_rsc_type,
+ "rsc_value": lc_rsc_value,
+ "rsc_label": lc_name,
+ "node": lc_node,
+ "score": lc_score,
+ "role": lc_role,
+ "resource-discovery": lc_resource_discovery,
+ }
if lc_node in nodeshash:
- nodeshash[lc_node].append((lc_id,lc_rsc,lc_score, lc_role, lc_resource_discovery))
+ nodeshash[lc_node].append(hash_element)
else:
- nodeshash[lc_node] = [(lc_id, lc_rsc,lc_score, lc_role, lc_resource_discovery)]
-
+ nodeshash[lc_node] = [hash_element]
if lc_rsc in rschash:
- rschash[lc_rsc].append((lc_id,lc_node,lc_score, lc_role, lc_resource_discovery))
+ rschash[lc_rsc].append(hash_element)
else:
- rschash[lc_rsc] = [(lc_id,lc_node,lc_score, lc_role, lc_resource_discovery)]
-
- nodelist = list(set(list(nodehashon.keys()) + list(nodehashoff.keys())))
- rsclist = list(set(list(rschashon.keys()) + list(rschashoff.keys())))
+ rschash[lc_rsc] = [hash_element]
+
+ nodelist = sorted(set(list(nodehashon.keys()) + list(nodehashoff.keys())))
+ rsclist = sorted(
+ set(list(rschashon.keys()) + list(rschashoff.keys())),
+ key=lambda item: (
+ {
+ RESOURCE_TYPE_RESOURCE: 1,
+ RESOURCE_TYPE_REGEXP: 0,
+ }[item[0]],
+ item[1]
+ )
+ )
if byNode == True:
for node in nodelist:
@@ -601,25 +647,29 @@ def location_show(argv):
if node in nodehash:
print(label)
for options in nodehash[node]:
- line_parts = [
- " " + options[1] + " (" + options[0] + ")",
- ]
- if options[3]:
- line_parts.append("(role: {0})".format(options[3]))
- if options[4]:
+ line_parts = [(
+ " " + options["rsc_label"]
+ + " (" + options["id"] + ")"
+ )]
+ if options["role"]:
line_parts.append(
- "(resource-discovery={0})".format(options[4])
+ "(role: {0})".format(options["role"])
)
- line_parts.append("Score: " + options[2])
+ if options["resource-discovery"]:
+ line_parts.append(
+ "(resource-discovery={0})".format(
+ options["resource-discovery"]
+ )
+ )
+ line_parts.append("Score: " + options["score"])
print(" ".join(line_parts))
- show_location_rules(ruleshash,showDetail)
+ show_location_rules(ruleshash, showDetail)
else:
- rsclist.sort()
for rsc in rsclist:
if len(valid_noderes) != 0:
- if rsc not in valid_noderes:
+ if rsc[0:2] not in valid_noderes:
continue
- print(" Resource: " + rsc)
+ print(" {0}".format(rsc[2]))
rschash_label = (
(rschashon, " Enabled on:"),
(rschashoff, " Disabled on:"),
@@ -627,32 +677,45 @@ def location_show(argv):
for rschash, label in rschash_label:
if rsc in rschash:
for options in rschash[rsc]:
- if not options[1]:
+ if not options["node"]:
continue
line_parts = [
label,
- options[1],
- "(score:{0})".format(options[2]),
+ options["node"],
+ "(score:{0})".format(options["score"]),
]
- if options[3]:
- line_parts.append("(role: {0})".format(options[3]))
- if options[4]:
+ if options["role"]:
+ line_parts.append(
+ "(role: {0})".format(options["role"])
+ )
+ if options["resource-discovery"]:
line_parts.append(
- "(resource-discovery={0})".format(options[4])
+ "(resource-discovery={0})".format(
+ options["resource-discovery"]
+ )
)
if showDetail:
- line_parts.append("(id:{0})".format(options[0]))
+ line_parts.append("(id:{0})".format(options["id"]))
print(" ".join(line_parts))
miniruleshash={}
- miniruleshash["Resource: " + rsc] = ruleshash["Resource: " + rsc]
- show_location_rules(miniruleshash,showDetail, True)
+ miniruleshash[rsc] = ruleshash[rsc]
+ show_location_rules(miniruleshash, showDetail, True)
-def show_location_rules(ruleshash,showDetail,noheader=False):
+def show_location_rules(ruleshash, showDetail, noheader=False):
constraint_options = {}
- for rsc in ruleshash:
- constrainthash= defaultdict(list)
+ for rsc in sorted(
+ ruleshash.keys(),
+ key=lambda item: (
+ {
+ RESOURCE_TYPE_RESOURCE: 1,
+ RESOURCE_TYPE_REGEXP: 0,
+ }[item[0]],
+ item[1]
+ )
+ ):
+ constrainthash = defaultdict(list)
if not noheader:
- print(" " + rsc)
+ print(" {0}".format(rsc[2]))
for rule in ruleshash[rsc]:
constraint_id = rule.parentNode.getAttribute("id")
constrainthash[constraint_id].append(rule)
@@ -676,6 +739,12 @@ def location_prefer(argv):
rsc = argv.pop(0)
prefer_option = argv.pop(0)
+ dummy_rsc_type, rsc_value = parse_args.parse_typed_arg(
+ rsc,
+ [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP],
+ RESOURCE_TYPE_RESOURCE
+ )
+
if prefer_option == "prefers":
prefer = True
elif prefer_option == "avoids":
@@ -703,80 +772,139 @@ def location_prefer(argv):
else:
score = "-" + score
node = nodeconf_a[0]
- location_add(["location-" +rsc+"-"+node+"-"+score,rsc,node,score])
+ location_add([
+ sanitize_id("location-{0}-{1}-{2}".format(rsc_value, node, score)),
+ rsc,
+ node,
+ score
+ ])
def location_add(argv,rm=False):
- if len(argv) < 4 and (rm == False or len(argv) < 1):
- usage.constraint()
+ if rm:
+ location_remove(argv)
+ return
+
+ if len(argv) < 4:
+ usage.constraint(["location add"])
sys.exit(1)
constraint_id = argv.pop(0)
+ rsc_type, rsc_value = parse_args.parse_typed_arg(
+ argv.pop(0),
+ [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP],
+ RESOURCE_TYPE_RESOURCE
+ )
+ node = argv.pop(0)
+ score = argv.pop(0)
+ options = []
+ # For now we only allow setting resource-discovery
+ if len(argv) > 0:
+ for arg in argv:
+ if '=' in arg:
+ options.append(arg.split('=',1))
+ else:
+ print("Error: bad option '%s'" % arg)
+ usage.constraint(["location add"])
+ sys.exit(1)
+ if options[-1][0] != "resource-discovery" and "--force" not in utils.pcs_options:
+ utils.err("bad option '%s', use --force to override" % options[-1][0])
- # If we're removing, we only care about the id
- if (rm == True):
- resource_name = ""
- node = ""
- score = ""
- else:
- id_valid, id_error = utils.validate_xml_id(constraint_id, 'constraint id')
- if not id_valid:
- utils.err(id_error)
- resource_name = argv.pop(0)
- node = argv.pop(0)
- score = argv.pop(0)
- options = []
- # For now we only allow setting resource-discovery
- if len(argv) > 0:
- for arg in argv:
- if '=' in arg:
- options.append(arg.split('=',1))
- else:
- print("Error: bad option '%s'" % arg)
- usage.constraint(["location add"])
- sys.exit(1)
- if options[-1][0] != "resource-discovery" and "--force" not in utils.pcs_options:
- utils.err("bad option '%s', use --force to override" % options[-1][0])
+ id_valid, id_error = utils.validate_xml_id(constraint_id, 'constraint id')
+ if not id_valid:
+ utils.err(id_error)
+ if not utils.is_score(score):
+ utils.err("invalid score '%s', use integer or INFINITY or -INFINITY" % score)
- resource_valid, resource_error, correct_id \
- = utils.validate_constraint_resource(
- utils.get_cib_dom(), resource_name
- )
+ required_version = None
+ if rsc_type == RESOURCE_TYPE_REGEXP:
+ required_version = 2, 6, 0
+
+ if required_version:
+ dom = utils.cluster_upgrade_to_version(required_version)
+ else:
+ dom = utils.get_cib_dom()
+
+ if rsc_type == RESOURCE_TYPE_RESOURCE:
+ rsc_valid, rsc_error, correct_id = utils.validate_constraint_resource(
+ dom, rsc_value
+ )
if "--autocorrect" in utils.pcs_options and correct_id:
- resource_name = correct_id
- elif not resource_valid:
- utils.err(resource_error)
- if not utils.is_score(score):
- utils.err("invalid score '%s', use integer or INFINITY or -INFINITY" % score)
+ rsc_value = correct_id
+ elif not rsc_valid:
+ utils.err(rsc_error)
# Verify current constraint doesn't already exist
# If it does we replace it with the new constraint
- (dom,constraintsElement) = getCurrentConstraints()
+ dummy_dom, constraintsElement = getCurrentConstraints(dom)
elementsToRemove = []
-
# If the id matches, or the rsc & node match, then we replace/remove
for rsc_loc in constraintsElement.getElementsByTagName('rsc_location'):
- if (constraint_id == rsc_loc.getAttribute("id")) or \
- (rsc_loc.getAttribute("rsc") == resource_name and \
- rsc_loc.getAttribute("node") == node and not rm):
+ if (
+ rsc_loc.getAttribute("id") == constraint_id
+ or
+ (
+ rsc_loc.getAttribute("node") == node
+ and
+ (
+ (
+ RESOURCE_TYPE_RESOURCE == rsc_type
+ and
+ rsc_loc.getAttribute("rsc") == rsc_value
+ )
+ or
+ (
+ RESOURCE_TYPE_REGEXP == rsc_type
+ and
+ rsc_loc.getAttribute("rsc-pattern") == rsc_value
+ )
+ )
+ )
+ ):
elementsToRemove.append(rsc_loc)
-
for etr in elementsToRemove:
constraintsElement.removeChild(etr)
- if (rm == True and len(elementsToRemove) == 0):
- utils.err("resource location id: " + constraint_id + " not found.")
+ element = dom.createElement("rsc_location")
+ element.setAttribute("id",constraint_id)
+ if rsc_type == RESOURCE_TYPE_RESOURCE:
+ element.setAttribute("rsc", rsc_value)
+ elif rsc_type == RESOURCE_TYPE_REGEXP:
+ element.setAttribute("rsc-pattern", rsc_value)
+ element.setAttribute("node",node)
+ element.setAttribute("score",score)
+ for option in options:
+ element.setAttribute(option[0], option[1])
+ constraintsElement.appendChild(element)
+
+ utils.replace_cib_configuration(dom)
+
+def location_remove(argv):
+ # This code was originally merged in the location_add function and was
+ # documented to take 1 or 4 arguments:
+ # location remove <id> [<resource id> <node> <score>]
+ # However it has always ignored all arguments but constraint id. Therefore
+ # this command / function has no use as it can be fully replaced by "pcs
+ # constraint remove" which also removes constraints by id. For now I keep
+ # things as they are but we should solve this when moving these functions
+ # to pcs.lib.
+ if len(argv) != 1:
+ usage.constraint(["location remove"])
+ sys.exit(1)
+
+ constraint_id = argv.pop(0)
+ dom, constraintsElement = getCurrentConstraints()
+
+ elementsToRemove = []
+ for rsc_loc in constraintsElement.getElementsByTagName('rsc_location'):
+ if constraint_id == rsc_loc.getAttribute("id"):
+ elementsToRemove.append(rsc_loc)
- if (not rm):
- element = dom.createElement("rsc_location")
- element.setAttribute("id",constraint_id)
- element.setAttribute("rsc",resource_name)
- element.setAttribute("node",node)
- element.setAttribute("score",score)
- for option in options:
- element.setAttribute(option[0], option[1])
- constraintsElement.appendChild(element)
+ if (len(elementsToRemove) == 0):
+ utils.err("resource location id: " + constraint_id + " not found.")
+ for etr in elementsToRemove:
+ constraintsElement.removeChild(etr)
utils.replace_cib_configuration(dom)
@@ -785,29 +913,52 @@ def location_rule(argv):
usage.constraint(["location", "rule"])
sys.exit(1)
- res_name = argv.pop(0)
- resource_valid, resource_error, correct_id \
- = utils.validate_constraint_resource(utils.get_cib_dom(), res_name)
- if "--autocorrect" in utils.pcs_options and correct_id:
- res_name = correct_id
- elif not resource_valid:
- utils.err(resource_error)
-
+ rsc_type, rsc_value = parse_args.parse_typed_arg(
+ argv.pop(0),
+ [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP],
+ RESOURCE_TYPE_RESOURCE
+ )
argv.pop(0) # pop "rule"
+ options, rule_argv = rule_utils.parse_argv(
+ argv,
+ {
+ "constraint-id": None,
+ "resource-discovery": None,
+ }
+ )
+ resource_discovery = (
+ "resource-discovery" in options
+ and
+ options["resource-discovery"]
+ )
+
+ required_version = None
+ if resource_discovery:
+ required_version = 2, 2, 0
+ if rsc_type == RESOURCE_TYPE_REGEXP:
+ required_version = 2, 6, 0
+
+ if required_version:
+ dom = utils.cluster_upgrade_to_version(required_version)
+ else:
+ dom = utils.get_cib_dom()
+
+ if rsc_type == RESOURCE_TYPE_RESOURCE:
+ rsc_valid, rsc_error, correct_id = utils.validate_constraint_resource(
+ dom, rsc_value
+ )
+ if "--autocorrect" in utils.pcs_options and correct_id:
+ rsc_value = correct_id
+ elif not rsc_valid:
+ utils.err(rsc_error)
- options, rule_argv = rule_utils.parse_argv(argv, {"constraint-id": None, "resource-discovery": None,})
+ cib, constraints = getCurrentConstraints(dom)
+ lc = cib.createElement("rsc_location")
# If resource-discovery is specified, we use it with the rsc_location
# element not the rule
- if "resource-discovery" in options and options["resource-discovery"]:
- utils.checkAndUpgradeCIB(2,2,0)
- cib, constraints = getCurrentConstraints(utils.get_cib_dom())
- lc = cib.createElement("rsc_location")
+ if resource_discovery:
lc.setAttribute("resource-discovery", options.pop("resource-discovery"))
- else:
- cib, constraints = getCurrentConstraints(utils.get_cib_dom())
- lc = cib.createElement("rsc_location")
-
constraints.appendChild(lc)
if options.get("constraint-id"):
@@ -816,7 +967,7 @@ def location_rule(argv):
)
if not id_valid:
utils.err(id_error)
- if utils.does_id_exist(cib, options["constraint-id"]):
+ if utils.does_id_exist(dom, options["constraint-id"]):
utils.err(
"id '%s' is already in use, please specify another one"
% options["constraint-id"]
@@ -824,8 +975,14 @@ def location_rule(argv):
lc.setAttribute("id", options["constraint-id"])
del options["constraint-id"]
else:
- lc.setAttribute("id", utils.find_unique_id(cib, "location-" + res_name))
- lc.setAttribute("rsc", res_name)
+ lc.setAttribute(
+ "id",
+ utils.find_unique_id(dom, sanitize_id("location-" + rsc_value))
+ )
+ if rsc_type == RESOURCE_TYPE_RESOURCE:
+ lc.setAttribute("rsc", rsc_value)
+ elif rsc_type == RESOURCE_TYPE_REGEXP:
+ lc.setAttribute("rsc-pattern", rsc_value)
rule_utils.dom_rule_add(lc, options, rule_argv)
location_rule_check_duplicates(constraints, lc)
@@ -849,8 +1006,18 @@ def location_rule_check_duplicates(dom, constraint_el):
def location_rule_find_duplicates(dom, constraint_el):
def normalize(constraint_el):
+ if constraint_el.hasAttribute("rsc-pattern"):
+ rsc = (
+ RESOURCE_TYPE_REGEXP,
+ constraint_el.getAttribute("rsc-pattern")
+ )
+ else:
+ rsc = (
+ RESOURCE_TYPE_RESOURCE,
+ constraint_el.getAttribute("rsc")
+ )
return (
- constraint_el.getAttribute("rsc"),
+ rsc,
[
rule_utils.ExportAsExpression().get_string(rule_el, True)
for rule_el in constraint_el.getElementsByTagName("rule")
diff --git a/pcs/lib/booth/config_files.py b/pcs/lib/booth/config_files.py
index 7b91379..762cc96 100644
--- a/pcs/lib/booth/config_files.py
+++ b/pcs/lib/booth/config_files.py
@@ -6,7 +6,6 @@ from __future__ import (
)
import os
-import binascii
from pcs.common import report_codes, env_file_role_codes as file_roles
from pcs.common.tools import format_environment_error
@@ -16,9 +15,6 @@ from pcs.lib.errors import ReportItemSeverity
from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
-def generate_key():
- return binascii.hexlify(os.urandom(32))
-
def get_all_configs_file_names():
"""
Returns list of all file names ending with '.conf' in booth configuration
diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py
index 09ff1a7..e30fae3 100644
--- a/pcs/lib/booth/config_structure.py
+++ b/pcs/lib/booth/config_structure.py
@@ -109,14 +109,17 @@ def validate_ticket_options(report_processor, options, allow_unknown_options):
reports = []
for key in sorted(options):
if key in GLOBAL_KEYS:
- reports.append(
- common_reports.invalid_option(key, TICKET_KEYS, "booth ticket")
- )
+ reports.append(common_reports.invalid_option(
+ [key],
+ TICKET_KEYS,
+ "booth ticket",
+ ))
elif key not in TICKET_KEYS:
reports.append(
common_reports.invalid_option(
- key, TICKET_KEYS,
+ [key],
+ TICKET_KEYS,
"booth ticket",
severity=(
severities.WARNING if allow_unknown_options
diff --git a/pcs/lib/booth/env.py b/pcs/lib/booth/env.py
index 57d47aa..e80b8c9 100644
--- a/pcs/lib/booth/env.py
+++ b/pcs/lib/booth/env.py
@@ -78,7 +78,8 @@ class BoothEnv(object):
self.__key_path = env_data["key_path"]
self.__key = GhostFile(
file_role=env_file_role_codes.BOOTH_KEY,
- content=env_data["key_file"]["content"]
+ content=env_data["key_file"]["content"],
+ is_binary=True
)
else:
self.__config = RealFile(
@@ -92,6 +93,7 @@ class BoothEnv(object):
self.__key = RealFile(
file_role=env_file_role_codes.BOOTH_KEY,
file_path=path,
+ is_binary=True
)
def command_expect_live_env(self):
@@ -131,7 +133,7 @@ class BoothEnv(object):
self.__report_processor,
can_overwrite_existing
)
- self.__key.write(key_content, set_keyfile_access, is_binary=True)
+ self.__key.write(key_content, set_keyfile_access)
def push_config(self, content):
self.__config.write(content)
diff --git a/pcs/lib/booth/resource.py b/pcs/lib/booth/resource.py
index a4b7b1e..cf00d8f 100644
--- a/pcs/lib/booth/resource.py
+++ b/pcs/lib/booth/resource.py
@@ -13,49 +13,22 @@ def create_resource_id(resources_section, name, suffix):
resources_section.getroottree(), "booth-{0}-{1}".format(name, suffix)
)
-def get_creator(resource_create, resource_remove=None):
- #TODO resource_create is provisional hack until resources are not moved to
- #lib
- def create_booth_in_cluster(ip, booth_config_file_path, create_id):
- ip_id = create_id("ip")
- booth_id = create_id("service")
- group_id = create_id("group")
-
- resource_create(
- ra_id=ip_id,
- ra_type="ocf:heartbeat:IPaddr2",
- ra_values=["ip={0}".format(ip)],
- op_values=[],
- meta_values=[],
- clone_opts=[],
- group=group_id,
- )
- try:
- resource_create(
- ra_id=booth_id,
- ra_type="ocf:pacemaker:booth-site",
- ra_values=["config={0}".format(booth_config_file_path)],
- op_values=[],
- meta_values=[],
- clone_opts=[],
- group=group_id,
- )
- except SystemExit:
- resource_remove(ip_id)
- return create_booth_in_cluster
-
def is_ip_resource(resource_element):
- return resource_element.attrib["type"] == "IPaddr2"
+ return resource_element.attrib.get("type", "") == "IPaddr2"
def find_grouped_ip_element_to_remove(booth_element):
- if booth_element.getparent().tag != "group":
+ group = booth_element.getparent()
+
+ if group.tag != "group":
return None
- group = booth_element.getparent()
- if len(group) != 2:
- #when something else in group, ip is not for remove
+ primitives = group.xpath("./primitive")
+ if len(primitives) != 2:
+ # Don't remove the IP resource if some other resources are in the group.
+ # It is most likely manually configured by the user so we cannot delete
+ # it automatically.
return None
- for element in group:
+ for element in primitives:
if is_ip_resource(element):
return element
return None
diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
index 40618b2..a611f10 100644
--- a/pcs/lib/booth/test/test_config_structure.py
+++ b/pcs/lib/booth/test/test_config_structure.py
@@ -59,7 +59,7 @@ class ValidateTicketOptionsTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "site",
+ "option_names": ["site"],
"option_type": "booth ticket",
"allowed": list(config_structure.TICKET_KEYS),
},
@@ -68,7 +68,7 @@ class ValidateTicketOptionsTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "port",
+ "option_names": ["port"],
"option_type": "booth ticket",
"allowed": list(config_structure.TICKET_KEYS),
},
@@ -86,7 +86,7 @@ class ValidateTicketOptionsTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "unknown",
+ "option_names": ["unknown"],
"option_type": "booth ticket",
"allowed": list(config_structure.TICKET_KEYS),
},
@@ -118,7 +118,7 @@ class ValidateTicketOptionsTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "site",
+ "option_names": ["site"],
"option_type": "booth ticket",
"allowed": list(config_structure.TICKET_KEYS),
},
@@ -141,7 +141,7 @@ class ValidateTicketOptionsTest(TestCase):
severities.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "unknown",
+ "option_names": ["unknown"],
"option_type": "booth ticket",
"allowed": list(config_structure.TICKET_KEYS),
},
diff --git a/pcs/lib/booth/test/test_env.py b/pcs/lib/booth/test/test_env.py
index 993d709..c14afbf 100644
--- a/pcs/lib/booth/test/test_env.py
+++ b/pcs/lib/booth/test/test_env.py
@@ -5,17 +5,13 @@ from __future__ import (
unicode_literals,
)
-import grp
-import os
-import pwd
from pcs.test.tools.pcs_unittest import TestCase
-from pcs import settings
from pcs.common import report_codes
from pcs.lib.booth import env
from pcs.lib.errors import ReportItemSeverity as severities
from pcs.test.tools.assertions import assert_raise_library_error
-from pcs.test.tools.misc import get_test_resource as rc, create_patcher
+from pcs.test.tools.misc import create_patcher
from pcs.test.tools.pcs_unittest import mock
patch_env = create_patcher("pcs.lib.booth.env")
@@ -109,7 +105,7 @@ class BoothEnvTest(TestCase):
"content": "secure",
"can_overwrite_existing_file": False,
"no_existing_file_expected": False,
- "is_binary": False,
+ "is_binary": True,
},
}
)
@@ -121,38 +117,25 @@ class BoothEnvTest(TestCase):
)
class SetKeyfileAccessTest(TestCase):
- def test_set_desired_file_access(self):
- #setup
- file_path = rc("temp-keyfile")
- if os.path.exists(file_path):
- os.remove(file_path)
- with open(file_path, "w") as file:
- file.write("content")
-
- #check assumptions
- stat = os.stat(file_path)
- self.assertNotEqual('600', oct(stat.st_mode)[-3:])
- current_user = pwd.getpwuid(os.getuid())[0]
- if current_user != settings.pacemaker_uname:
- file_user = pwd.getpwuid(stat.st_uid)[0]
- self.assertNotEqual(file_user, settings.pacemaker_uname)
- current_group = grp.getgrgid(os.getgid())[0]
- if current_group != settings.pacemaker_gname:
- file_group = grp.getgrgid(stat.st_gid)[0]
- self.assertNotEqual(file_group, settings.pacemaker_gname)
-
- #run tested method
+ @patch_env("os.chmod")
+ @patch_env("os.chown")
+ @patch_env("grp.getgrnam")
+ @patch_env("pwd.getpwnam")
+ @patch_env("settings")
+ def test_do_everything_to_set_desired_file_access(
+ self, settings, getpwnam, getgrnam, chown, chmod
+ ):
+ file_path = "/tmp/some_booth_file"
env.set_keyfile_access(file_path)
- #check
- stat = os.stat(file_path)
- self.assertEqual('600', oct(stat.st_mode)[-3:])
-
- file_user = pwd.getpwuid(stat.st_uid)[0]
- self.assertEqual(file_user, settings.pacemaker_uname)
+ getpwnam.assert_called_once_with(settings.pacemaker_uname)
+ getgrnam.assert_called_once_with(settings.pacemaker_gname)
- file_group = grp.getgrgid(stat.st_gid)[0]
- self.assertEqual(file_group, settings.pacemaker_gname)
+ chown.assert_called_once_with(
+ file_path,
+ getpwnam.return_value.pw_uid,
+ getgrnam.return_value.gr_gid,
+ )
@patch_env("pwd.getpwnam", mock.MagicMock(side_effect=KeyError))
@patch_env("settings.pacemaker_uname", "some-user")
diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py
index 8971438..929c33b 100644
--- a/pcs/lib/booth/test/test_resource.py
+++ b/pcs/lib/booth/test/test_resource.py
@@ -11,7 +11,6 @@ from lxml import etree
import pcs.lib.booth.resource as booth_resource
from pcs.test.tools.pcs_unittest import mock
-from pcs.test.tools.misc import get_test_resource as rc
def fixture_resources_with_booth(booth_config_file_path):
@@ -91,6 +90,9 @@ class RemoveFromClusterTest(TestCase):
booth_resource.get_remover(mock_resource_remove)(element_list)
return mock_resource_remove
+ def find_booth_resources(self, tree):
+ return tree.xpath('.//primitive[@type="booth-site"]')
+
def test_remove_ip_when_is_only_booth_sibling_in_group(self):
group = etree.fromstring('''
<group>
@@ -103,7 +105,7 @@ class RemoveFromClusterTest(TestCase):
</group>
''')
- mock_resource_remove = self.call(group.getchildren()[1:])
+ mock_resource_remove = self.call(self.find_booth_resources(group))
self.assertEqual(
mock_resource_remove.mock_calls, [
mock.call('ip'),
@@ -111,43 +113,73 @@ class RemoveFromClusterTest(TestCase):
]
)
-class CreateInClusterTest(TestCase):
- def test_remove_ip_when_booth_resource_add_failed(self):
- mock_resource_create = mock.Mock(side_effect=[None, SystemExit(1)])
- mock_resource_remove = mock.Mock()
- mock_create_id = mock.Mock(side_effect=["ip_id","booth_id","group_id"])
- ip = "1.2.3.4"
- booth_config_file_path = rc("/path/to/booth.conf")
-
- booth_resource.get_creator(mock_resource_create, mock_resource_remove)(
- ip,
- booth_config_file_path,
- mock_create_id
+ def test_remove_ip_when_group_is_disabled_1(self):
+ group = etree.fromstring('''
+ <group>
+ <primitive id="ip" type="IPaddr2"/>
+ <primitive id="booth" type="booth-site">
+ <instance_attributes>
+ <nvpair name="config" value="/PATH/TO/CONF"/>
+ </instance_attributes>
+ </primitive>
+ <meta_attributes>
+ <nvpair name="target-role" value="Stopped"/>
+ </meta_attributes>
+ </group>
+ ''')
+
+ mock_resource_remove = self.call(self.find_booth_resources(group))
+ self.assertEqual(
+ mock_resource_remove.mock_calls, [
+ mock.call('ip'),
+ mock.call('booth'),
+ ]
)
- self.assertEqual(mock_resource_create.mock_calls, [
- mock.call(
- clone_opts=[],
- group=u'group_id',
- meta_values=[],
- op_values=[],
- ra_id=u'ip_id',
- ra_type=u'ocf:heartbeat:IPaddr2',
- ra_values=[u'ip=1.2.3.4'],
- ),
- mock.call(
- clone_opts=[],
- group='group_id',
- meta_values=[],
- op_values=[],
- ra_id='booth_id',
- ra_type='ocf:pacemaker:booth-site',
- ra_values=['config=/path/to/booth.conf'],
- )
- ])
- mock_resource_remove.assert_called_once_with("ip_id")
+ def test_remove_ip_when_group_is_disabled_2(self):
+ group = etree.fromstring('''
+ <group>
+ <meta_attributes>
+ <nvpair name="target-role" value="Stopped"/>
+ </meta_attributes>
+ <primitive id="ip" type="IPaddr2"/>
+ <primitive id="booth" type="booth-site">
+ <instance_attributes>
+ <nvpair name="config" value="/PATH/TO/CONF"/>
+ </instance_attributes>
+ </primitive>
+ </group>
+ ''')
+
+ mock_resource_remove = self.call(self.find_booth_resources(group))
+ self.assertEqual(
+ mock_resource_remove.mock_calls, [
+ mock.call('ip'),
+ mock.call('booth'),
+ ]
+ )
+
+ def test_dont_remove_ip_when_group_has_other_resources(self):
+ group = etree.fromstring('''
+ <group>
+ <primitive id="ip" type="IPaddr2"/>
+ <primitive id="booth" type="booth-site">
+ <instance_attributes>
+ <nvpair name="config" value="/PATH/TO/CONF"/>
+ </instance_attributes>
+ </primitive>
+ <primitive id="dummy" type="Dummy"/>
+ </group>
+ ''')
+
+ mock_resource_remove = self.call(self.find_booth_resources(group))
+ self.assertEqual(
+ mock_resource_remove.mock_calls, [
+ mock.call('booth'),
+ ]
+ )
-class FindBindedIpTest(TestCase):
+class FindBoundIpTest(TestCase):
def fixture_resource_section(self, ip_element_list):
resources_section = etree.fromstring('<resources/>')
group = etree.SubElement(resources_section, "group")
diff --git a/pcs/lib/cib/acl.py b/pcs/lib/cib/acl.py
index 7f91cba..073763b 100644
--- a/pcs/lib/cib/acl.py
+++ b/pcs/lib/cib/acl.py
@@ -5,40 +5,32 @@ from __future__ import (
unicode_literals,
)
+from functools import partial
+
from lxml import etree
from pcs.lib import reports
from pcs.lib.errors import LibraryError
from pcs.lib.cib.tools import (
- etree_element_attibutes_to_dict,
check_new_id_applicable,
does_id_exist,
find_unique_id,
- get_acls,
+ find_element_by_tag_and_id,
)
+from pcs.lib.xml_tools import etree_element_attibutes_to_dict
-class AclError(Exception):
- pass
-
-
-class AclRoleNotFound(AclError):
- # pylint: disable=super-init-not-called
- def __init__(self, role_id):
- self.role_id = role_id
-
-
-class AclTargetNotFound(AclError):
- # pylint: disable=super-init-not-called
- def __init__(self, target_id):
- self.target_id = target_id
-
-
-class AclGroupNotFound(AclError):
- # pylint: disable=super-init-not-called
- def __init__(self, group_id):
- self.group_id = group_id
+TAG_GROUP = "acl_group"
+TAG_ROLE = "acl_role"
+TAG_TARGET = "acl_target"
+TAG_PERMISSION = "acl_permission"
+TAG_DESCRIPTION_MAP = {
+ TAG_GROUP: "group",
+ TAG_ROLE: "role",
+ TAG_TARGET: "user",
+ TAG_PERMISSION: "permission"
+}
def validate_permissions(tree, permission_info_list):
"""
@@ -74,35 +66,57 @@ def validate_permissions(tree, permission_info_list):
raise LibraryError(*report_items)
-def find_role(tree, role_id):
- """
- Returns acl_role element with specified role_id in given tree.
- Raise AclRoleNotFound if role doesn't exist.
+def _find(
+ tag, acl_section, element_id, none_if_id_unused=False, id_description=None
+):
+ if tag not in TAG_DESCRIPTION_MAP.keys():
+ raise AssertionError("Unknown acl tag '{0}'".format(tag))
+
+ return find_element_by_tag_and_id(
+ tag,
+ acl_section,
+ element_id,
+ id_description=id_description if id_description
+ else TAG_DESCRIPTION_MAP[tag]
+ ,
+ none_if_id_unused=none_if_id_unused,
+ )
- tree -- etree node
- role_id -- id of role
+find_group = partial(_find, TAG_GROUP)
+find_role = partial(_find, TAG_ROLE)
+find_target = partial(_find, TAG_TARGET)
+
+def find_target_or_group(acl_section, target_or_group_id):
"""
- role = tree.find('.//acl_role[@id="{0}"]'.format(role_id))
- if role is not None:
- return role
- raise AclRoleNotFound(role_id)
+ Returns acl_target or acl_group element with id target_or_group_id. Target
+ element has bigger priority so if there are target and group with the same
+ id only target element will be affected by this function.
+ Raises LibraryError if there is no target or group element with
+ specified id.
+ This approach is DEPRECATED and it is there only for backward compatibility
+ reason. It is better to know explicitly whether we need target(user) or
+ group.
-def _find_permission(tree, permission_id):
+ acl_section -- cib etree node
+ target_or_group_id -- id of target/group element which should be returned
"""
- Returns acl_permission element with specified id.
- Raises LibraryError if that permission doesn't exist.
+ target = find_target(
+ acl_section,
+ target_or_group_id,
+ none_if_id_unused=True
+ )
- tree -- etree node
- permisson_id -- id of permision element
- """
- permission = tree.find(".//acl_permission[@id='{0}']".format(permission_id))
- if permission is not None:
- return permission
- raise LibraryError(reports.id_not_found(permission_id, "permission"))
+ if target is not None:
+ return target
+ return find_group(
+ acl_section,
+ target_or_group_id,
+ id_description="user/group"
+ )
-def create_role(tree, role_id, description=None):
+def create_role(acl_section, role_id, description=None):
"""
Create new role element and add it to cib.
Returns newly created role element.
@@ -110,31 +124,46 @@ def create_role(tree, role_id, description=None):
role_id id of desired role
description role description
"""
- check_new_id_applicable(tree, "ACL role", role_id)
- role = etree.SubElement(get_acls(tree), "acl_role", id=role_id)
+ check_new_id_applicable(acl_section, "ACL role", role_id)
+ role = etree.SubElement(acl_section, TAG_ROLE, id=role_id)
if description:
role.set("description", description)
return role
-def remove_role(tree, role_id, autodelete_users_groups=False):
+def remove_role(acl_section, role_id, autodelete_users_groups=False):
"""
Remove role with specified id from CIB and all references to it.
- tree -- etree node
+ acl_section -- etree node
role_id -- id of role to be removed
autodelete_users_group -- if True remove targets with no role after removing
"""
- acl_role = find_role(tree, role_id)
+ acl_role = find_role(acl_section, role_id)
acl_role.getparent().remove(acl_role)
- for role_el in tree.findall(".//role[@id='{0}']".format(role_id)):
+ for role_el in acl_section.findall(".//role[@id='{0}']".format(role_id)):
role_parent = role_el.getparent()
role_parent.remove(role_el)
if autodelete_users_groups and role_parent.find(".//role") is None:
role_parent.getparent().remove(role_parent)
+def _assign_role(acl_section, role_id, target_el):
+ try:
+ role_el = find_role(acl_section, role_id)
+ except LibraryError as e:
+ return list(e.args)
+ assigned_role = target_el.find(
+ "./role[@id='{0}']".format(role_el.get("id"))
+ )
+ if assigned_role is not None:
+ return [reports.acl_role_is_already_assigned_to_target(
+ role_el.get("id"), target_el.get("id")
+ )]
+ etree.SubElement(target_el, "role", {"id": role_el.get("id")})
+ return []
+
-def assign_role(target_el, role_el):
+def assign_role(acl_section, role_id, target_el):
"""
Assign role element to specified target/group element.
Raise LibraryError if role is already assigned to target/group.
@@ -142,14 +171,25 @@ def assign_role(target_el, role_el):
target_el -- etree element of target/group to which role should be assign
role_el -- etree element of role
"""
- assigned_role = target_el.find(
- "./role[@id='{0}']".format(role_el.get("id"))
- )
- if assigned_role is not None:
- raise LibraryError(reports.acl_role_is_already_assigned_to_target(
- role_el.get("id"), target_el.get("id")
- ))
- etree.SubElement(target_el, "role", {"id": role_el.get("id")})
+ report_list = _assign_role(acl_section, role_id, target_el)
+ if report_list:
+ raise LibraryError(*report_list)
+
+def assign_all_roles(acl_section, role_id_list, element):
+ """
+ Assign roles from role_id_list to element.
+ Raises LibraryError on any failure.
+
+ acl_section -- cib etree node
+ element -- element to which specified roles should be assigned
+ role_id_list -- list of role id
+ """
+ report_list = []
+ for role_id in role_id_list:
+ report_list.extend(_assign_role(acl_section, role_id, element))
+ if report_list:
+ raise LibraryError(*report_list)
+
def unassign_role(target_el, role_id, autodelete_target=False):
@@ -172,101 +212,67 @@ def unassign_role(target_el, role_id, autodelete_target=False):
target_el.getparent().remove(target_el)
-def find_target(tree, target_id):
- """
- Return acl_target etree element with specified id.
- Raise AclTargetNotFound if target with specified id doesn't exist.
-
- tree -- etree node
- target_id -- if of target to find
- """
- role = get_acls(tree).find('./acl_target[@id="{0}"]'.format(target_id))
- if role is None:
- raise AclTargetNotFound(target_id)
- return role
-
-
-def find_group(tree, group_id):
- """
- Returns acl_group etree element with specified id.
- Raise AclGroupNotFound if group with group_id doesn't exist.
-
- tree -- etree node
- group_id -- id of group to find
- """
- role = get_acls(tree).find('./acl_group[@id="{0}"]'.format(group_id))
- if role is None:
- raise AclGroupNotFound(group_id)
- return role
-
-
-def provide_role(tree, role_id):
+def provide_role(acl_section, role_id):
"""
Returns role with id role_id. If doesn't exist, it will be created.
role_id id of desired role
"""
- try:
- return find_role(tree, role_id)
- except AclRoleNotFound:
- return create_role(tree, role_id)
+ role = find_role(acl_section, role_id, none_if_id_unused=True)
+ return role if role is not None else create_role(acl_section, role_id)
-def create_target(tree, target_id):
+def create_target(acl_section, target_id):
"""
Creates new acl_target element with id target_id.
Raises LibraryError if target with wpecified id aleready exists.
- tree -- etree node
+ acl_section -- etree node
target_id -- id of new target
"""
- acl_el = get_acls(tree)
# id of element acl_target is not type ID in CIB ACL schema so we don't need
# to check if it is unique ID in whole CIB
- if acl_el.find("./acl_target[@id='{0}']".format(target_id)) is not None:
+ if(
+ acl_section.find("./{0}[@id='{1}']".format(TAG_TARGET, target_id))
+ is not None
+ ):
raise LibraryError(reports.acl_target_already_exists(target_id))
- return etree.SubElement(get_acls(tree), "acl_target", id=target_id)
+ return etree.SubElement(acl_section, TAG_TARGET, id=target_id)
-def create_group(tree, group_id):
+def create_group(acl_section, group_id):
"""
Creates new acl_group element with specified id.
Raises LibraryError if tree contains element with id group_id.
- tree -- etree node
+ acl_section -- etree node
group_id -- id of new group
"""
- check_new_id_applicable(tree, "ACL group", group_id)
- return etree.SubElement(get_acls(tree), "acl_group", id=group_id)
+ check_new_id_applicable(acl_section, "ACL group", group_id)
+ return etree.SubElement(acl_section, TAG_GROUP, id=group_id)
-def remove_target(tree, target_id):
+def remove_target(acl_section, target_id):
"""
- Removes acl_target element from tree with specified id.
+ Removes acl_target element from acl_section with specified id.
Raises LibraryError if target with id target_id doesn't exist.
- tree -- etree node
+ acl_section -- etree node
target_id -- id of target element to remove
"""
- try:
- target = find_target(tree, target_id)
- target.getparent().remove(target)
- except AclTargetNotFound:
- raise LibraryError(reports.id_not_found(target_id, "user"))
+ target = find_target(acl_section, target_id)
+ target.getparent().remove(target)
-def remove_group(tree, group_id):
+def remove_group(acl_section, group_id):
"""
Removes acl_group element from tree with specified id.
Raises LibraryError if group with id group_id doesn't exist.
- tree -- etree node
+ acl_section -- etree node
group_id -- id of group element to remove
"""
- try:
- group = find_group(tree, group_id)
- group.getparent().remove(group)
- except AclGroupNotFound:
- raise LibraryError(reports.id_not_found(group_id, "group"))
+ group = find_group(acl_section, group_id)
+ group.getparent().remove(group)
def add_permissions_to_role(role_el, permission_info_list):
@@ -294,20 +300,20 @@ def add_permissions_to_role(role_el, permission_info_list):
perm.set(area_type_attribute_map[scope_type], scope)
-def remove_permission(tree, permission_id):
+def remove_permission(acl_section, permission_id):
"""
- Remove permission with id permission_id from tree.
+ Remove permission with id permission_id from acl_section.
- tree -- etree node
+ acl_section -- etree node
permission_id -- id of permission element to be removed
"""
- permission = _find_permission(tree, permission_id)
+ permission = _find(TAG_PERMISSION, acl_section, permission_id)
permission.getparent().remove(permission)
-def get_role_list(tree):
+def get_role_list(acl_section):
"""
- Returns list of all acl_role elements from tree.
+ Returns list of all acl_role elements from acl_section.
Format of items of output list:
{
"id": <role-id>,
@@ -315,10 +321,10 @@ def get_role_list(tree):
"permission_list": [<see function _get_all_permission_list>, ...]
}
- tree -- etree node
+ acl_section -- etree node
"""
output_list = []
- for role_el in get_acls(tree).findall("./acl_role"):
+ for role_el in acl_section.findall("./{0}".format(TAG_ROLE)):
role = etree_element_attibutes_to_dict(
role_el, ["id", "description"]
)
@@ -356,7 +362,7 @@ def _get_permission_list(role_el):
return output_list
-def get_target_list(tree):
+def get_target_list(acl_section):
"""
Returns list of acl_target elements in format:
{
@@ -364,12 +370,12 @@ def get_target_list(tree):
"role_list": [<assign role_id as string>, ...]
}
- tree -- etree node
+ acl_section -- etree node
"""
- return _get_target_like_list_with_tag(tree, "acl_target")
+ return get_target_like_list(acl_section, TAG_TARGET)
-def get_group_list(tree):
+def get_group_list(acl_section):
"""
Returns list of acl_group elements in format:
{
@@ -377,14 +383,14 @@ def get_group_list(tree):
"role_list": [<assign role_id as string>, ...]
}
- tree -- etree node
+ acl_section -- etree node
"""
- return _get_target_like_list_with_tag(tree, "acl_group")
+ return get_target_like_list(acl_section, TAG_GROUP)
-def _get_target_like_list_with_tag(tree, tag):
+def get_target_like_list(acl_section, tag):
output_list = []
- for target_el in get_acls(tree).findall("./{0}".format(tag)):
+ for target_el in acl_section.findall("./{0}".format(tag)):
output_list.append({
"id": target_el.get("id"),
"role_list": _get_role_list_of_target(target_el),
@@ -421,13 +427,3 @@ def dom_remove_permissions_referencing(dom, reference):
for permission in dom.getElementsByTagName("acl_permission"):
if permission.getAttribute("reference") == reference:
permission.parentNode.removeChild(permission)
-
-
-def acl_error_to_report_item(e):
- if e.__class__ == AclTargetNotFound:
- return reports.id_not_found(e.target_id, "user")
- elif e.__class__ == AclGroupNotFound:
- return reports.id_not_found(e.group_id, "group")
- elif e.__class__ == AclRoleNotFound:
- return reports.id_not_found(e.role_id, "role")
- raise e
diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py
index c3a2cd9..3ed7186 100644
--- a/pcs/lib/cib/alert.py
+++ b/pcs/lib/cib/alert.py
@@ -10,16 +10,22 @@ from functools import partial
from pcs.common import report_codes
from pcs.lib import reports
-from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
+from pcs.lib.errors import ReportItemSeverity as Severities
from pcs.lib.cib.nvpair import arrange_first_nvset, get_nvset
from pcs.lib.cib.tools import (
check_new_id_applicable,
- get_sub_element,
find_unique_id,
get_alerts,
validate_id_does_not_exist,
+ find_element_by_tag_and_id,
)
+from pcs.lib.xml_tools import get_sub_element
+TAG_ALERT = "alert"
+TAG_RECIPIENT = "recipient"
+
+find_alert = partial(find_element_by_tag_and_id, TAG_ALERT)
+find_recipient = partial(find_element_by_tag_and_id, TAG_RECIPIENT)
update_instance_attributes = partial(
arrange_first_nvset,
@@ -43,38 +49,6 @@ def _update_optional_attribute(element, attribute, value):
elif attribute in element.attrib:
del element.attrib[attribute]
-
-def get_alert_by_id(tree, alert_id):
- """
- Returns alert element with specified id.
- Raises LibraryError if alert with specified id doesn't exist.
-
- tree -- cib etree node
- alert_id -- id of alert
- """
- alert = get_alerts(tree).find("./alert[@id='{0}']".format(alert_id))
- if alert is None:
- raise LibraryError(reports.cib_alert_not_found(alert_id))
- return alert
-
-
-def get_recipient_by_id(tree, recipient_id):
- """
- Returns recipient element with value recipient_value which belong to
- specified alert.
- Raises LibraryError if recipient doesn't exist.
-
- tree -- cib etree node
- recipient_id -- id of recipient
- """
- recipient = get_alerts(tree).find(
- "./alert/recipient[@id='{0}']".format(recipient_id)
- )
- if recipient is None:
- raise LibraryError(reports.id_not_found(recipient_id, "Recipient"))
- return recipient
-
-
def ensure_recipient_value_is_unique(
reporter, alert, recipient_value, recipient_id="", allow_duplicity=False
):
@@ -138,7 +112,7 @@ def update_alert(tree, alert_id, path, description=None):
description -- new value of description, stay unchanged if None, remove
if empty
"""
- alert = get_alert_by_id(tree, alert_id)
+ alert = find_alert(get_alerts(tree), alert_id)
if path:
alert.set("path", path)
_update_optional_attribute(alert, "description", description)
@@ -153,7 +127,7 @@ def remove_alert(tree, alert_id):
tree -- cib etree node
alert_id -- id of alert which should be removed
"""
- alert = get_alert_by_id(tree, alert_id)
+ alert = find_alert(get_alerts(tree), alert_id)
alert.getparent().remove(alert)
@@ -184,7 +158,7 @@ def add_recipient(
else:
validate_id_does_not_exist(tree, recipient_id)
- alert = get_alert_by_id(tree, alert_id)
+ alert = find_alert(get_alerts(tree), alert_id)
ensure_recipient_value_is_unique(
reporter, alert, recipient_value, allow_duplicity=allow_same_value
)
@@ -218,7 +192,7 @@ def update_recipient(
if None
allow_same_value -- if True unique recipient value is not required
"""
- recipient = get_recipient_by_id(tree, recipient_id)
+ recipient = find_recipient(get_alerts(tree), recipient_id)
if recipient_value is not None:
ensure_recipient_value_is_unique(
reporter,
@@ -240,7 +214,7 @@ def remove_recipient(tree, recipient_id):
tree -- cib etree node
recipient_id -- id of recipient to be removed
"""
- recipient = get_recipient_by_id(tree, recipient_id)
+ recipient = find_recipient(get_alerts(tree), recipient_id)
recipient.getparent().remove(recipient)
diff --git a/pcs/lib/cib/constraint/colocation.py b/pcs/lib/cib/constraint/colocation.py
index 9dd423a..3f121c2 100644
--- a/pcs/lib/cib/constraint/colocation.py
+++ b/pcs/lib/cib/constraint/colocation.py
@@ -11,7 +11,7 @@ from pcs.lib import reports
from pcs.lib.cib.constraint import constraint
from pcs.lib.cib.tools import check_new_id_applicable
from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker_values import is_score_value, SCORE_INFINITY
+from pcs.lib.pacemaker.values import is_score, SCORE_INFINITY
TAG_NAME = 'rsc_colocation'
DESCRIPTION = "constraint id"
@@ -25,7 +25,7 @@ def prepare_options_with_set(cib, options, resource_set_list):
partial(check_new_id_applicable, cib, DESCRIPTION),
)
- if "score" in options and not is_score_value(options["score"]):
+ if "score" in options and not is_score(options["score"]):
raise LibraryError(reports.invalid_score(options["score"]))
score_attrs_count = len([
diff --git a/pcs/lib/cib/constraint/constraint.py b/pcs/lib/cib/constraint/constraint.py
index 68939b2..d7c16bc 100644
--- a/pcs/lib/cib/constraint/constraint.py
+++ b/pcs/lib/cib/constraint/constraint.py
@@ -12,29 +12,42 @@ from pcs.common import report_codes
from pcs.lib import reports
from pcs.lib.cib import resource
from pcs.lib.cib.constraint import resource_set
-from pcs.lib.cib.tools import export_attributes, find_unique_id, find_parent
+from pcs.lib.cib.tools import (
+ find_unique_id,
+ find_element_by_tag_and_id,
+)
from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs.lib.xml_tools import (
+ export_attributes,
+ find_parent,
+)
def _validate_attrib_names(attrib_names, options):
- for option_name in options.keys():
- if option_name not in attrib_names:
- raise LibraryError(
- reports.invalid_option(option_name, attrib_names, None)
- )
+ invalid_names = [
+ name for name in options.keys()
+ if name not in attrib_names
+ ]
+ if invalid_names:
+ raise LibraryError(
+ reports.invalid_option(invalid_names, attrib_names, None)
+ )
def find_valid_resource_id(
report_processor, cib, can_repair_to_clone, in_clone_allowed, id
):
- resource_element = resource.find_by_id(cib, id)
-
- if(resource_element is None):
- raise LibraryError(reports.resource_does_not_exist(id))
-
- if resource_element.tag in resource.TAGS_CLONE:
+ parent_tags = resource.clone.ALL_TAGS + [resource.bundle.TAG]
+ resource_element = find_element_by_tag_and_id(
+ parent_tags + [resource.primitive.TAG, resource.group.TAG],
+ cib,
+ id,
+ id_description="resource"
+ )
+
+ if resource_element.tag in parent_tags:
return resource_element.attrib["id"]
- clone = find_parent(resource_element, resource.TAGS_CLONE)
+ clone = find_parent(resource_element, parent_tags)
if clone is None:
return resource_element.attrib["id"]
diff --git a/pcs/lib/cib/constraint/resource_set.py b/pcs/lib/cib/constraint/resource_set.py
index f01edf1..1ef1c6c 100644
--- a/pcs/lib/cib/constraint/resource_set.py
+++ b/pcs/lib/cib/constraint/resource_set.py
@@ -8,11 +8,9 @@ from __future__ import (
from lxml import etree
from pcs.lib import reports
-from pcs.lib.cib.tools import (
- find_unique_id,
- export_attributes,
-)
+from pcs.lib.cib.tools import find_unique_id
from pcs.lib.errors import LibraryError
+from pcs.lib.xml_tools import export_attributes
ATTRIB = {
"sequential": ("true", "false"),
@@ -35,7 +33,7 @@ def validate_options(options):
for name, value in options.items():
if name not in ATTRIB:
raise LibraryError(
- reports.invalid_option(name, list(ATTRIB.keys()), None)
+ reports.invalid_option([name], list(ATTRIB.keys()), None)
)
if value not in ATTRIB[name]:
raise LibraryError(
diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py
index 85d045c..ad5a64f 100644
--- a/pcs/lib/cib/constraint/ticket.py
+++ b/pcs/lib/cib/constraint/ticket.py
@@ -54,7 +54,7 @@ def prepare_options_with_set(cib, options, resource_set_list):
)
report = _validate_options_common(options)
if "ticket" not in options or not options["ticket"].strip():
- report.append(reports.required_option_is_missing('ticket'))
+ report.append(reports.required_option_is_missing(['ticket']))
if report:
raise LibraryError(*report)
return options
@@ -65,11 +65,11 @@ def prepare_options_plain(cib, options, ticket, resource_id):
report = _validate_options_common(options)
if not ticket:
- report.append(reports.required_option_is_missing('ticket'))
+ report.append(reports.required_option_is_missing(['ticket']))
options["ticket"] = ticket
if not resource_id:
- report.append(reports.required_option_is_missing('rsc'))
+ report.append(reports.required_option_is_missing(['rsc']))
options["rsc"] = resource_id
if "rsc-role" in options:
diff --git a/pcs/lib/cib/fencing_topology.py b/pcs/lib/cib/fencing_topology.py
new file mode 100644
index 0000000..7e2feb6
--- /dev/null
+++ b/pcs/lib/cib/fencing_topology.py
@@ -0,0 +1,322 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.common.fencing_topology import (
+ TARGET_TYPE_NODE,
+ TARGET_TYPE_REGEXP,
+ TARGET_TYPE_ATTRIBUTE,
+)
+from pcs.lib import reports
+from pcs.lib.cib.stonith import is_stonith_resource
+from pcs.lib.cib.tools import find_unique_id
+from pcs.lib.errors import ReportItemSeverity
+from pcs.lib.pacemaker.values import sanitize_id, validate_id
+
+def add_level(
+ reporter, topology_el, resources_el, level, target_type, target_value,
+ devices, cluster_status_nodes, force_device=False, force_node=False
+):
+ """
+ Validate and add a new fencing level. Raise LibraryError if not valid.
+
+ object reporter -- report processor
+ etree topology_el -- etree element to add the level to
+ etree resources_el -- etree element with resources definitions
+ int|string level -- level (index) of the new fencing level
+ constant target_type -- the new fencing level target value type
+ mixed target_value -- the new fencing level target value
+ Iterable devices -- list of stonith devices for the new fencing level
+ Iterable cluster_status_nodes -- list of status of existing cluster nodes
+ bool force_device -- continue even if a stonith device does not exist
+ bool force_node -- continue even if a node (target) does not exist
+ """
+ valid_level = _validate_level(reporter, level)
+ _validate_target(
+ reporter, cluster_status_nodes, target_type, target_value, force_node
+ )
+ _validate_devices(reporter, resources_el, devices, force_device)
+ reporter.send()
+ _validate_level_target_devices_does_not_exist(
+ reporter, topology_el, level, target_type, target_value, devices
+ )
+ reporter.send()
+ _append_level_element(
+ topology_el, valid_level, target_type, target_value, devices
+ )
+
+def remove_all_levels(topology_el):
+ """
+ Remove all fencing levels.
+ etree topology_el -- etree element to remove the levels from
+ """
+ for level_el in topology_el.findall("fencing-level"):
+ level_el.getparent().remove(level_el)
+
+def remove_levels_by_params(
+ reporter, topology_el, level=None, target_type=None, target_value=None,
+ devices=None, ignore_if_missing=False
+):
+ """
+ Remove specified fencing level(s). Raise LibraryError if not found.
+
+ object reporter -- report processor
+ etree topology_el -- etree element to remove the levels from
+ int|string level -- level (index) of the new fencing level
+ constant target_type -- the new fencing level target value type
+ mixed target_value -- the new fencing level target value
+ Iterable devices -- list of stonith devices for the new fencing level
+ bool ignore_if_missing -- when True, do not raise if level not found
+ """
+ if target_type:
+ _validate_target_typewise(reporter, target_type)
+ reporter.send()
+
+ level_el_list = _find_level_elements(
+ topology_el, level, target_type, target_value, devices
+ )
+
+ if not level_el_list:
+ if ignore_if_missing:
+ return
+ reporter.process(reports.fencing_level_does_not_exist(
+ level, target_type, target_value, devices
+ ))
+ for el in level_el_list:
+ el.getparent().remove(el)
+
+def remove_device_from_all_levels(topology_el, device_id):
+ """
+ Remove specified stonith device from all fencing levels.
+
+ etree topology_el -- etree element with levels to remove the device from
+ string device_id -- stonith device to remove
+ """
+ for level_el in topology_el.findall("fencing-level"):
+ new_devices = [
+ dev
+ for dev in level_el.get("devices").split(",")
+ if dev != device_id
+ ]
+ if new_devices:
+ level_el.set("devices", ",".join(new_devices))
+ else:
+ level_el.getparent().remove(level_el)
+
+def export(topology_el):
+ """
+ Export all fencing levels.
+
+ Return a list of levels where each level is a dict with keys: target_type,
+ target_value. level and devices. Devices is a list of stonith device ids.
+
+ etree topology_el -- etree element to export
+ """
+ export_levels = []
+ for level_el in topology_el.iterfind("fencing-level"):
+ target_type = target_value = None
+ if "target" in level_el.attrib:
+ target_type = TARGET_TYPE_NODE
+ target_value = level_el.get("target")
+ elif "target-pattern" in level_el.attrib:
+ target_type = TARGET_TYPE_REGEXP
+ target_value = level_el.get("target-pattern")
+ elif "target-attribute" in level_el.attrib:
+ target_type = TARGET_TYPE_ATTRIBUTE
+ target_value = (
+ level_el.get("target-attribute"),
+ level_el.get("target-value")
+ )
+ if target_type and target_value:
+ export_levels.append({
+ "target_type": target_type,
+ "target_value": target_value,
+ "level": level_el.get("index"),
+ "devices": level_el.get("devices").split(",")
+ })
+ return export_levels
+
+def verify(reporter, topology_el, resources_el, cluster_status_nodes):
+ """
+ Check if all cluster nodes and stonith devices used in fencing levels exist.
+
+ All errors are stored into the passed reporter. Calling function is
+ responsible for processing the report.
+
+ object reporter -- report processor
+ etree topology_el -- etree element with fencing levels to check
+ etree resources_el -- etree element with resources definitions
+ Iterable cluster_status_nodes -- list of status of existing cluster nodes
+ """
+ used_nodes = set()
+ used_devices = set()
+
+ for level_el in topology_el.iterfind("fencing-level"):
+ used_devices.update(level_el.get("devices").split(","))
+ if "target" in level_el.attrib:
+ used_nodes.add(level_el.get("target"))
+
+ if used_devices:
+ _validate_devices(
+ reporter,
+ resources_el,
+ sorted(used_devices),
+ allow_force=False
+ )
+
+ for node in sorted(used_nodes):
+ _validate_target_valuewise(
+ reporter,
+ cluster_status_nodes,
+ TARGET_TYPE_NODE,
+ node,
+ allow_force=False
+ )
+
+def _validate_level(reporter, level):
+ try:
+ candidate = int(level)
+ if candidate > 0:
+ return candidate
+ except ValueError:
+ pass
+ reporter.append(
+ reports.invalid_option_value("level", level, "a positive integer")
+ )
+
+def _validate_target(
+ reporter, cluster_status_nodes, target_type, target_value,
+ force_node=False
+):
+ _validate_target_typewise(reporter, target_type)
+ _validate_target_valuewise(
+ reporter, cluster_status_nodes, target_type, target_value, force_node
+ )
+
+def _validate_target_typewise(reporter, target_type):
+ if target_type not in [
+ TARGET_TYPE_NODE, TARGET_TYPE_ATTRIBUTE, TARGET_TYPE_REGEXP
+ ]:
+ reporter.append(reports.invalid_option_type(
+ "target",
+ ["node", "regular expression", "attribute_name=value"]
+ ))
+
+def _validate_target_valuewise(
+ reporter, cluster_status_nodes, target_type, target_value, force_node=False,
+ allow_force=True
+):
+ if target_type == TARGET_TYPE_NODE:
+ node_found = False
+ for node in cluster_status_nodes:
+ if target_value == node.attrs.name:
+ node_found = True
+ break
+ if not node_found:
+ reporter.append(
+ reports.node_not_found(
+ target_value,
+ severity=ReportItemSeverity.WARNING
+ if force_node and allow_force
+ else ReportItemSeverity.ERROR
+ ,
+ forceable=None if force_node or not allow_force
+ else report_codes.FORCE_NODE_DOES_NOT_EXIST
+ )
+ )
+
+def _validate_devices(
+ reporter, resources_el, devices, force_device=False, allow_force=True
+):
+ if not devices:
+ reporter.append(
+ reports.required_option_is_missing(["stonith devices"])
+ )
+ invalid_devices = []
+ for dev in devices:
+ errors = reporter.errors_count
+ validate_id(dev, description="device id", reporter=reporter)
+ if reporter.errors_count > errors:
+ continue
+ # TODO use the new finding function
+ if not is_stonith_resource(resources_el, dev):
+ invalid_devices.append(dev)
+ if invalid_devices:
+ reporter.append(
+ reports.stonith_resources_do_not_exist(
+ invalid_devices,
+ ReportItemSeverity.WARNING if force_device and allow_force
+ else ReportItemSeverity.ERROR
+ ,
+ None if force_device or not allow_force
+ else report_codes.FORCE_STONITH_RESOURCE_DOES_NOT_EXIST
+ )
+ )
+
+def _validate_level_target_devices_does_not_exist(
+ reporter, tree, level, target_type, target_value, devices
+):
+ if _find_level_elements(tree, level, target_type, target_value, devices):
+ reporter.append(
+ reports.fencing_level_already_exists(
+ level, target_type, target_value, devices
+ )
+ )
+
+def _append_level_element(tree, level, target_type, target_value, devices):
+ level_el = etree.SubElement(
+ tree,
+ "fencing-level",
+ index=str(level),
+ devices=",".join(devices)
+ )
+ if target_type == TARGET_TYPE_NODE:
+ level_el.set("target", target_value)
+ id_part = target_value
+ elif target_type == TARGET_TYPE_REGEXP:
+ level_el.set("target-pattern", target_value)
+ id_part = target_value
+ elif target_type == TARGET_TYPE_ATTRIBUTE:
+ level_el.set("target-attribute", target_value[0])
+ level_el.set("target-value", target_value[1])
+ id_part = target_value[0]
+ level_el.set(
+ "id",
+ find_unique_id(tree, sanitize_id("fl-{0}-{1}".format(id_part, level)))
+ )
+ return level_el
+
+def _find_level_elements(
+ tree, level=None, target_type=None, target_value=None, devices=None
+):
+ xpath_target = ""
+ if target_type and target_value:
+ if target_type == TARGET_TYPE_NODE:
+ xpath_target = "@target='{0}'".format(target_value)
+ elif target_type == TARGET_TYPE_REGEXP:
+ xpath_target = "@target-pattern='{0}'".format(target_value)
+ elif target_type == TARGET_TYPE_ATTRIBUTE:
+ xpath_target = (
+ "@target-attribute='{0}' and @target-value='{1}'".format(
+ target_value[0], target_value[1]
+ )
+ )
+ xpath_devices = ""
+ if devices:
+ xpath_devices = "@devices='{0}'".format(",".join(devices))
+ xpath_level = ""
+ if level:
+ xpath_level = "@index='{0}'".format(level)
+
+ xpath_attrs = " and ".join(
+ filter(None, [xpath_level, xpath_devices, xpath_target])
+ )
+ if xpath_attrs:
+ return tree.xpath("fencing-level[{0}]".format(xpath_attrs))
+ return tree.findall("fencing-level")
diff --git a/pcs/lib/cib/node.py b/pcs/lib/cib/node.py
new file mode 100644
index 0000000..c5b059f
--- /dev/null
+++ b/pcs/lib/cib/node.py
@@ -0,0 +1,91 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.cib.nvpair import update_nvset
+from pcs.lib.cib.tools import get_nodes, find_unique_id
+from pcs.lib.errors import LibraryError
+
+
+def update_node_instance_attrs(cib, node_name, attrs, state_nodes=None):
+ """
+ Update nvpairs in instance_attributes for a node specified by its name.
+
+ Automatically creates instance_attributes element if needed. If the node has
+ more than one instance_attributes element, the first one is modified. If the
+ node is missing in the CIB, it is automatically created if its state is
+ provided in state_nodes.
+
+ etree cib -- cib
+ string node_name -- name of the node to be updated
+ dict attrs -- attrs to update, e.g. {'A': 'a', 'B': ''}
+ iterable state_nodes -- optional list of node state objects
+ """
+ node_el = _ensure_node_exists(get_nodes(cib), node_name, state_nodes)
+ # If no instance_attributes id is specified, crm_attribute modifies the
+ # first one found. So we just mimic this behavior here.
+ attrs_el = node_el.find("./instance_attributes")
+ if attrs_el is None:
+ attrs_el = etree.SubElement(
+ node_el,
+ "instance_attributes",
+ id=find_unique_id(cib, "nodes-{0}".format(node_el.get("id")))
+ )
+ update_nvset(attrs_el, attrs)
+
+def _ensure_node_exists(tree, node_name, state_nodes=None):
+ """
+ Make sure node with specified name exists in the tree.
+
+ If the node doesn't exist, raise LibraryError. If state_nodes is provided
+ and contains state of a node with the specified name, create the node in
+ the tree. Return existing or created node element.
+
+ etree tree -- node parent element
+ string name -- node name
+ iterable state_nodes -- optional list of node state objects
+ """
+ node_el = _get_node_by_uname(tree, node_name)
+ if node_el is None and state_nodes:
+ for node_state in state_nodes:
+ if node_state.attrs.name == node_name:
+ node_el = _create_node(
+ tree,
+ node_state.attrs.id,
+ node_state.attrs.name,
+ node_state.attrs.type
+ )
+ break
+ if node_el is None:
+ raise LibraryError(reports.node_not_found(node_name))
+ return node_el
+
+def _get_node_by_uname(tree, uname):
+ """
+ Return a node element with specified uname in the tree or None if not found
+
+ etree tree -- node parent element
+ string uname -- node name
+ """
+ return tree.find("./node[@uname='{0}']".format(uname))
+
+def _create_node(tree, node_id, uname, node_type=None):
+ """
+ Create new node element as a direct child of the tree element
+
+ etree tree -- node parent element
+ string node_id -- node id
+ string uname -- node name
+ string node_type -- optional node type (normal, member, ping, remote)
+ """
+ node = etree.SubElement(tree, "node", id=node_id, uname=uname)
+ if node_type:
+ node.set("type", node_type)
+ return node
+
diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
index fad1ffa..261d17c 100644
--- a/pcs/lib/cib/nvpair.py
+++ b/pcs/lib/cib/nvpair.py
@@ -6,12 +6,26 @@ from __future__ import (
)
from lxml import etree
+from functools import partial
-from pcs.lib.cib.tools import (
- get_sub_element,
- create_subelement_id,
-)
+from pcs.lib.cib.tools import create_subelement_id
+from pcs.lib.xml_tools import get_sub_element
+
+def _append_new_nvpair(nvset_element, name, value):
+ """
+ Create nvpair with name and value as subelement of nvset_element.
+ etree.Element nvset_element is context of new nvpair
+ string name is name attribute of new nvpair
+ string value is value attribute of new nvpair
+ """
+ etree.SubElement(
+ nvset_element,
+ "nvpair",
+ id=create_subelement_id(nvset_element, name),
+ name=name,
+ value=value
+ )
def set_nvpair_in_nvset(nvset_element, name, value):
"""
@@ -25,38 +39,29 @@ def set_nvpair_in_nvset(nvset_element, name, value):
nvpair = nvset_element.find("./nvpair[@name='{0}']".format(name))
if nvpair is None:
if value:
- etree.SubElement(
- nvset_element,
- "nvpair",
- id=create_subelement_id(nvset_element, name),
- name=name,
- value=value
- )
+ _append_new_nvpair(nvset_element, name, value)
else:
if value:
nvpair.set("value", value)
else:
nvset_element.remove(nvpair)
-def arrange_first_nvset(tag_name, context_element, attribute_dict):
+def arrange_first_nvset(tag_name, context_element, nvpair_dict):
"""
- Arrange to context_element contains some nvset (with tag_name) with nvpairs
- corresponing to attribute_dict.
+ Put nvpairs to the first tag_name nvset in the context_element.
+
+ If the nvset does not exist, it will be created.
- WARNING: does not solve multiple nvset (with tag_name) under
- context_element! Consider carefully if this is your case. Probably not.
+ WARNING: does not solve multiple nvsets (with the same tag_name) in the
+ context_element! Consider carefully if this is your use case. Probably not.
There could be more than one nvset.
This function is DEPRECATED. Try to use update_nvset etc.
- This method updates nvset specified by tag_name. If specified nvset
- doesn't exist it will be created. Returns updated nvset element or None if
- attribute_dict is empty.
-
- tag_name -- tag name of nvset element
- context_element -- parent element of nvset
- attribute_dict -- dictionary of nvpairs
+ string tag_name -- tag name of nvset element
+ etree context_element -- parent element of nvset
+ dict nvpair_dict -- dictionary of nvpairs
"""
- if not attribute_dict:
+ if not nvpair_dict:
return
nvset_element = get_sub_element(
@@ -66,11 +71,47 @@ def arrange_first_nvset(tag_name, context_element, attribute_dict):
new_index=0
)
- update_nvset(nvset_element, attribute_dict)
+ update_nvset(nvset_element, nvpair_dict)
+
+def append_new_nvset(tag_name, context_element, nvpair_dict):
+ """
+ Append new nvset_element comprising nvpairs children (corresponding
+ nvpair_dict) to the context_element
+
+ string tag_name should be "instance_attributes" or "meta_attributes"
+ etree.Element context_element is element where new nvset will be appended
+ dict nvpair_dict contains source for nvpair children
+ """
+ nvset_element = etree.SubElement(context_element, tag_name, {
+ "id": create_subelement_id(context_element, tag_name)
+ })
+ for name, value in sorted(nvpair_dict.items()):
+ _append_new_nvpair(nvset_element, name, value)
+
+append_new_instance_attributes = partial(
+ append_new_nvset,
+ "instance_attributes"
+)
+
+append_new_meta_attributes = partial(
+ append_new_nvset,
+ "meta_attributes"
+)
-def update_nvset(nvset_element, attribute_dict):
- for name, value in sorted(attribute_dict.items()):
+def update_nvset(nvset_element, nvpair_dict):
+ """
+ Add, remove or update nvpairs according to nvpair_dict into nvset_element
+
+ If the resulting nvset is empty, it will be removed.
+
+ etree nvset_element -- container where nvpairs are set
+ dict nvpair_dict -- contains source for nvpair children
+ """
+ for name, value in sorted(nvpair_dict.items()):
set_nvpair_in_nvset(nvset_element, name, value)
+ # remove an empty nvset
+ if not list(nvset_element):
+ nvset_element.getparent().remove(nvset_element)
def get_nvset(nvset):
"""
@@ -94,3 +135,43 @@ def get_nvset(nvset):
"value": nvpair.get("value", "")
})
return nvpair_list
+
+def get_value(tag_name, context_element, name, default=None):
+ """
+ Return value from nvpair.
+
+ WARNING: does not solve multiple nvsets (with the same tag_name) in the
+ context_element nor multiple nvpair with the same name
+
+ string tag_name should be "instance_attributes" or "meta_attributes"
+ etree.Element context_element is searched element
+ string name specify nvpair name
+ """
+ value_list = context_element.xpath("""
+ ./{0}
+ /nvpair[
+ @name="{1}"
+ and
+ string-length(@value) > 0
+ ]
+ /@value
+ """.format(tag_name, name))
+ return value_list[0] if value_list else default
+
+def has_meta_attribute(resource_el, name):
+ """
+ Return if the element contains meta attribute 'name'
+
+ etree.Element resource_el is researched element
+ string name specifies attribute
+ """
+ return 0 < len(resource_el.xpath(
+ './meta_attributes/nvpair[@name="{0}"]'.format(name)
+ ))
+
+arrange_first_meta_attributes = partial(
+ arrange_first_nvset,
+ "meta_attributes"
+)
+
+get_meta_attribute_value = partial(get_value, "meta_attributes")
diff --git a/pcs/lib/cib/resource.py b/pcs/lib/cib/resource.py
deleted file mode 100644
index ed692f6..0000000
--- a/pcs/lib/cib/resource.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from __future__ import (
- absolute_import,
- division,
- print_function,
- unicode_literals,
-)
-
-TAGS_CLONE = "clone", "master"
-TAGS_ALL = TAGS_CLONE + ("primitive", "group")
-
-def find_by_id(tree, id):
- for element in tree.findall('.//*[@id="{0}"]'.format(id)):
- if element is not None and element.tag in TAGS_ALL:
- return element
- return None
diff --git a/pcs/lib/cib/resource/__init__.py b/pcs/lib/cib/resource/__init__.py
new file mode 100644
index 0000000..803bb8b
--- /dev/null
+++ b/pcs/lib/cib/resource/__init__.py
@@ -0,0 +1,17 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.lib.cib.resource import (
+ bundle,
+ clone,
+ common,
+ group,
+ guest_node,
+ operations,
+ primitive,
+ remote_node,
+)
diff --git a/pcs/lib/cib/resource/bundle.py b/pcs/lib/cib/resource/bundle.py
new file mode 100644
index 0000000..0fe16f3
--- /dev/null
+++ b/pcs/lib/cib/resource/bundle.py
@@ -0,0 +1,516 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib import reports, validate
+from pcs.lib.cib.resource.primitive import TAG as TAG_PRIMITIVE
+from pcs.lib.cib.tools import find_element_by_tag_and_id
+from pcs.lib.errors import (
+ LibraryError,
+ ReportListAnalyzer,
+)
+from pcs.lib.pacemaker.values import sanitize_id
+from pcs.lib.xml_tools import (
+ get_sub_element,
+ update_attributes_remove_empty,
+)
+
+TAG = "bundle"
+
+_docker_options = set((
+ "image",
+ "masters",
+ "network",
+ "options",
+ "run-command",
+ "replicas",
+ "replicas-per-host",
+))
+
+_network_options = set((
+ "control-port",
+ "host-interface",
+ "host-netmask",
+ "ip-range-start",
+))
+
+def is_bundle(resource_el):
+ return resource_el.tag == TAG
+
+def validate_new(
+ id_provider, bundle_id, container_type, container_options, network_options,
+ port_map, storage_map, force_options=False
+):
+ """
+ Validate new bundle parameters, return list of report items
+
+ IdProvider id_provider -- elements' ids generator and uniqueness checker
+ string bundle_id -- id of the bundle
+ string container_type -- bundle container type
+ dict container_options -- container options
+ dict network_options -- network options
+ list of dict port_map -- list of port mapping options
+ list of dict storage_map -- list of storage mapping options
+ bool force_options -- return warnings instead of forceable errors
+ """
+ report_list = []
+
+ report_list.extend(
+ validate.run_collection_of_option_validators(
+ {"id": bundle_id},
+ [
+ # with id_provider it validates that the id is available as well
+ validate.value_id("id", "bundle name", id_provider),
+ ]
+ )
+ )
+
+ aux_reports = _validate_container_type(container_type)
+ report_list.extend(aux_reports)
+ if not ReportListAnalyzer(aux_reports).error_list:
+ report_list.extend(
+ # TODO call the proper function once more container_types are
+ # supported by pacemaker
+ _validate_container_docker_options_new(
+ container_options,
+ force_options
+ )
+ )
+ report_list.extend(
+ _validate_network_options_new(network_options, force_options)
+ )
+ report_list.extend(
+ _validate_port_map_list(port_map, id_provider, force_options)
+ )
+ report_list.extend(
+ _validate_storage_map_list(storage_map, id_provider, force_options)
+ )
+
+ return report_list
+
+def append_new(
+ parent_element, id_provider, bundle_id, container_type, container_options,
+ network_options, port_map, storage_map
+):
+ """
+ Create new bundle and add it to the CIB
+
+ etree parent_element -- the bundle will be appended to this element
+ IdProvider id_provider -- elements' ids generator
+ string bundle_id -- id of the bundle
+ string container_type -- bundle container type
+ dict container_options -- container options
+ dict network_options -- network options
+ list of dict port_map -- list of port mapping options
+ list of dict storage_map -- list of storage mapping options
+ """
+ bundle_element = etree.SubElement(parent_element, TAG, {"id": bundle_id})
+ # TODO create the proper element once more container_types are supported
+ # by pacemaker
+ docker_element = etree.SubElement(bundle_element, "docker")
+ # Do not add options with empty values. When updating, an empty value means
+ # remove the option.
+ update_attributes_remove_empty(docker_element, container_options)
+ if network_options or port_map:
+ network_element = etree.SubElement(bundle_element, "network")
+ # Do not add options with empty values. When updating, an empty value
+ # means remove the option.
+ update_attributes_remove_empty(network_element, network_options)
+ for port_map_options in port_map:
+ _append_port_map(
+ network_element, id_provider, bundle_id, port_map_options
+ )
+ if storage_map:
+ storage_element = etree.SubElement(bundle_element, "storage")
+ for storage_map_options in storage_map:
+ _append_storage_map(
+ storage_element, id_provider, bundle_id, storage_map_options
+ )
+ return bundle_element
+
+def validate_update(
+ id_provider, bundle_el, container_options, network_options,
+ port_map_add, port_map_remove, storage_map_add, storage_map_remove,
+ force_options=False
+):
+ """
+ Validate modifying an existing bundle, return list of report items
+
+ IdProvider id_provider -- elements' ids generator and uniqueness checker
+ etree bundle_el -- the bundle to be updated
+ dict container_options -- container options to modify
+ dict network_options -- network options to modify
+ list of dict port_map_add -- list of port mapping options to add
+ list of string port_map_remove -- list of port mapping ids to remove
+ list of dict storage_map_add -- list of storage mapping options to add
+ list of string storage_map_remove -- list of storage mapping ids to remove
+ bool force_options -- return warnings instead of forceable errors
+ """
+ report_list = []
+
+ container_el = _get_container_element(bundle_el)
+ if container_el.tag == "docker":
+ # TODO call the proper function once more container types are
+ # supported by pacemaker
+ report_list.extend(
+ _validate_container_docker_options_update(
+ container_el,
+ container_options,
+ force_options
+ )
+ )
+
+ network_el = bundle_el.find("network")
+ if network_el is None:
+ report_list.extend(
+ _validate_network_options_new(network_options, force_options)
+ )
+ else:
+ report_list.extend(
+ _validate_network_options_update(
+ network_el,
+ network_options,
+ force_options
+ )
+ )
+
+ # TODO It will probably be needed to split the following validators to
+ # create and update variants. It should be done once the need exists and
+ # not sooner.
+ report_list.extend(
+ _validate_port_map_list(port_map_add, id_provider, force_options)
+ )
+ report_list.extend(
+ _validate_storage_map_list(storage_map_add, id_provider, force_options)
+ )
+ report_list.extend(
+ _validate_map_ids_exist(
+ bundle_el, "port-mapping", "port-map", port_map_remove
+ )
+ )
+ report_list.extend(
+ _validate_map_ids_exist(
+ bundle_el, "storage-mapping", "storage-map", storage_map_remove
+ )
+ )
+ return report_list
+
+def update(
+ id_provider, bundle_el, container_options, network_options,
+ port_map_add, port_map_remove, storage_map_add, storage_map_remove
+):
+ """
+ Modify an existing bundle (does not touch encapsulated resources)
+
+ IdProvider id_provider -- elements' ids generator and uniqueness checker
+ etree bundle_el -- the bundle to be updated
+ dict container_options -- container options to modify
+ dict network_options -- network options to modify
+ list of dict port_map_add -- list of port mapping options to add
+ list of string port_map_remove -- list of port mapping ids to remove
+ list of dict storage_map_add -- list of storage mapping options to add
+ list of string storage_map_remove -- list of storage mapping ids to remove
+ """
+ bundle_id = bundle_el.get("id")
+ update_attributes_remove_empty(
+ _get_container_element(bundle_el),
+ container_options
+ )
+
+ network_element = get_sub_element(bundle_el, "network")
+ if network_options:
+ update_attributes_remove_empty(network_element, network_options)
+ # It's crucial to remove port maps prior to appending new ones: If we are
+ # adding a port map which in any way conflicts with another one and that
+ # another one is being removed in the very same command, the removal must
+ # be done first, otherwise the conflict would manifest itself (and then
+ # possibly the old mapping would be removed)
+ if port_map_remove:
+ _remove_map_elements(
+ network_element.findall("port-mapping"),
+ port_map_remove
+ )
+ for port_map_options in port_map_add:
+ _append_port_map(
+ network_element, id_provider, bundle_id, port_map_options
+ )
+
+ storage_element = get_sub_element(bundle_el, "storage")
+ # See the comment above about removing port maps prior to adding new ones.
+ if storage_map_remove:
+ _remove_map_elements(
+ storage_element.findall("storage-mapping"),
+ storage_map_remove
+ )
+ for storage_map_options in storage_map_add:
+ _append_storage_map(
+ storage_element, id_provider, bundle_id, storage_map_options
+ )
+
+ # remove empty elements with no attributes
+ for element in (network_element, storage_element):
+ if len(element) < 1 and not element.attrib:
+ element.getparent().remove(element)
+
+def add_resource(bundle_element, primitive_element):
+ """
+ Add an existing resource to an existing bundle
+
+ etree bundle_element -- where to add the resource to
+ etree primitive_element -- the resource to be added to the bundle
+ """
+ # TODO possibly split to 'validate' and 'do' functions
+ # a bundle may currently contain at most one primitive resource
+ inner_primitive = bundle_element.find(TAG_PRIMITIVE)
+ if inner_primitive is not None:
+ raise LibraryError(reports.resource_bundle_already_contains_a_resource(
+ bundle_element.get("id"), inner_primitive.get("id")
+ ))
+ bundle_element.append(primitive_element)
+
+def get_inner_resource(bundle_el):
+ resources = bundle_el.xpath("./primitive")
+ if resources:
+ return resources[0]
+ return None
+
+def _validate_container_type(container_type):
+ return validate.value_in("type", ("docker", ), "container type")({
+ "type": container_type,
+ })
+
+def _validate_container_docker_options_new(options, force_options):
+ validators = [
+ validate.is_required("image", "container"),
+ validate.value_not_empty("image", "image name"),
+ validate.value_nonnegative_integer("masters"),
+ validate.value_positive_integer("replicas"),
+ validate.value_positive_integer("replicas-per-host"),
+ ]
+ return (
+ validate.run_collection_of_option_validators(options, validators)
+ +
+ validate.names_in(
+ _docker_options,
+ options.keys(),
+ "container",
+ report_codes.FORCE_OPTIONS,
+ force_options
+ )
+ )
+
+def _validate_container_docker_options_update(
+ docker_el, options, force_options
+):
+ validators = [
+ # image is a mandatory attribute and cannot be removed
+ validate.value_not_empty("image", "image name"),
+ validate.value_empty_or_valid(
+ "masters",
+ validate.value_nonnegative_integer("masters")
+ ),
+ validate.value_empty_or_valid(
+ "replicas",
+ validate.value_positive_integer("replicas")
+ ),
+ validate.value_empty_or_valid(
+ "replicas-per-host",
+ validate.value_positive_integer("replicas-per-host")
+ ),
+ ]
+ return (
+ validate.run_collection_of_option_validators(options, validators)
+ +
+ validate.names_in(
+ # allow to remove options even if they are not allowed
+ _docker_options | _options_to_remove(options),
+ options.keys(),
+ "container",
+ report_codes.FORCE_OPTIONS,
+ force_options
+ )
+ )
+
+def _validate_network_options_new(options, force_options):
+ validators = [
+ # TODO add validators for other keys (ip-range-start - IPv4)
+ validate.value_port_number("control-port"),
+ _value_host_netmask("host-netmask", force_options),
+ ]
+ return (
+ validate.run_collection_of_option_validators(options, validators)
+ +
+ validate.names_in(
+ _network_options,
+ options.keys(),
+ "network",
+ report_codes.FORCE_OPTIONS,
+ force_options
+ )
+ )
+
+def _validate_network_options_update(network_el, options, force_options):
+ validators = [
+ # TODO add validators for other keys (ip-range-start - IPv4)
+ validate.value_empty_or_valid(
+ "control-port",
+ validate.value_port_number("control-port"),
+ ),
+ validate.value_empty_or_valid(
+ "host-netmask",
+ _value_host_netmask("host-netmask", force_options),
+ ),
+ ]
+ return (
+ validate.run_collection_of_option_validators(options, validators)
+ +
+ validate.names_in(
+ # allow to remove options even if they are not allowed
+ _network_options | _options_to_remove(options),
+ options.keys(),
+ "network",
+ report_codes.FORCE_OPTIONS,
+ force_options
+ )
+ )
+
+def _validate_port_map_list(options_list, id_provider, force_options):
+ allowed_options = [
+ "id",
+ "port",
+ "internal-port",
+ "range",
+ ]
+ validators = [
+ validate.value_id("id", "port-map id", id_provider),
+ validate.depends_on_option(
+ "internal-port", "port", "port-map", "port-map"
+ ),
+ validate.is_required_some_of(["port", "range"], "port-map"),
+ validate.mutually_exclusive(["port", "range"], "port-map"),
+ validate.value_port_number("port"),
+ validate.value_port_number("internal-port"),
+ validate.value_port_range(
+ "range",
+ code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
+ allow_extra_values=force_options
+ ),
+ ]
+ report_list = []
+ for options in options_list:
+ report_list.extend(
+ validate.run_collection_of_option_validators(options, validators)
+ +
+ validate.names_in(
+ allowed_options,
+ options.keys(),
+ "port-map",
+ report_codes.FORCE_OPTIONS,
+ force_options
+ )
+ )
+ return report_list
+
+def _validate_storage_map_list(options_list, id_provider, force_options):
+ allowed_options = [
+ "id",
+ "options",
+ "source-dir",
+ "source-dir-root",
+ "target-dir",
+ ]
+ source_dir_options = ["source-dir", "source-dir-root"]
+ validators = [
+ validate.value_id("id", "storage-map id", id_provider),
+ validate.is_required_some_of(source_dir_options, "storage-map"),
+ validate.mutually_exclusive(source_dir_options, "storage-map"),
+ validate.is_required("target-dir", "storage-map"),
+ ]
+ report_list = []
+ for options in options_list:
+ report_list.extend(
+ validate.run_collection_of_option_validators(options, validators)
+ +
+ validate.names_in(
+ allowed_options,
+ options.keys(),
+ "storage-map",
+ report_codes.FORCE_OPTIONS,
+ force_options
+ )
+ )
+ return report_list
+
+def _validate_map_ids_exist(bundle_el, map_type, map_label, id_list):
+ report_list = []
+ for id in id_list:
+ try:
+ find_element_by_tag_and_id(
+ map_type, bundle_el, id, id_description=map_label
+ )
+ except LibraryError as e:
+ report_list.extend(e.args)
+ return report_list
+
+def _value_host_netmask(option_name, force_options):
+ return validate.value_cond(
+ option_name,
+ lambda value: validate.is_integer(value, 1, 32),
+ "a number of bits of the mask (1-32)",
+ # Leaving a possibility to force this validation, if pacemaker
+ # starts supporting IPv6 or other format of the netmask
+ code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
+ allow_extra_values=force_options
+ )
+
+def _append_port_map(parent_element, id_provider, id_base, port_map_options):
+ if "id" not in port_map_options:
+ id_suffix = None
+ if "port" in port_map_options:
+ id_suffix = port_map_options["port"]
+ elif "range" in port_map_options:
+ id_suffix = port_map_options["range"]
+ if id_suffix:
+ port_map_options["id"] = id_provider.allocate_id(
+ sanitize_id("{0}-port-map-{1}".format(id_base, id_suffix))
+ )
+ port_map_element = etree.SubElement(parent_element, "port-mapping")
+ # Do not add options with empty values. When updating, an empty value means
+ # remove the option.
+ update_attributes_remove_empty(port_map_element, port_map_options)
+ return port_map_element
+
+def _append_storage_map(
+ parent_element, id_provider, id_base, storage_map_options
+):
+ if "id" not in storage_map_options:
+ storage_map_options["id"] = id_provider.allocate_id(
+ # use just numbers to keep the ids reasonably short
+ "{0}-storage-map".format(id_base)
+ )
+ storage_map_element = etree.SubElement(parent_element, "storage-mapping")
+ # Do not add options with empty values. When updating, an empty value means
+ # remove the option.
+ update_attributes_remove_empty(storage_map_element, storage_map_options)
+ return storage_map_element
+
+def _get_container_element(bundle_el):
+ # TODO get different types of container once supported by pacemaker
+ return bundle_el.find("docker")
+
+def _remove_map_elements(element_list, id_to_remove_list):
+ for el in element_list:
+ if el.get("id", "") in id_to_remove_list:
+ el.getparent().remove(el)
+
+def _options_to_remove(options):
+ return set([
+ name for name, value in options.items()
+ if validate.is_empty_string(value)
+ ])
diff --git a/pcs/lib/cib/resource/clone.py b/pcs/lib/cib/resource/clone.py
new file mode 100644
index 0000000..cf0bca4
--- /dev/null
+++ b/pcs/lib/cib/resource/clone.py
@@ -0,0 +1,71 @@
+"""
+Module for stuff related to clones.
+Multi-state resources are a specialization of clone resources. So this module
+include stuffs related to master.
+"""
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib.cib.nvpair import append_new_meta_attributes
+from pcs.lib.cib.tools import find_unique_id
+
+
+TAG_CLONE = "clone"
+TAG_MASTER = "master"
+ALL_TAGS = [TAG_CLONE, TAG_MASTER]
+
+def is_clone(resource_el):
+ return resource_el.tag == TAG_CLONE
+
+def is_master(resource_el):
+ return resource_el.tag == TAG_MASTER
+
+def is_any_clone(resource_el):
+ return resource_el.tag in ALL_TAGS
+
+def create_id(clone_tag, primitive_element):
+ """
+ Create id for clone element based on contained primitive_element.
+
+ string clone_tag is tag of clone element. Specialization of "clone" is
+ "master" and this function is common for both - "clone" and "master".
+ etree.Element primitive_element is resource which will be cloned.
+ It must be connected into the cib to ensure that the resulting id is
+ unique!
+ """
+ return find_unique_id(
+ primitive_element,
+ "{0}-{1}".format(primitive_element.get("id"), clone_tag)
+ )
+
+def append_new(clone_tag, resources_section, primitive_element, options):
+ """
+ Append a new clone element (containing the primitive_element) to the
+ resources_section.
+
+ string clone_tag is tag of clone element. Expected values are "clone" and
+ "master".
+ etree.Element resources_section is place where new clone will be appended.
+ etree.Element primitive_element is resource which will be cloned.
+ dict options is source for clone meta options
+ """
+ clone_element = etree.SubElement(
+ resources_section,
+ clone_tag,
+ id=create_id(clone_tag, primitive_element),
+ )
+ clone_element.append(primitive_element)
+
+ if options:
+ append_new_meta_attributes(clone_element, options)
+
+ return clone_element
+
+def get_inner_resource(clone_el):
+ return clone_el.xpath("./primitive | ./group")[0]
diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py
new file mode 100644
index 0000000..f9028ff
--- /dev/null
+++ b/pcs/lib/cib/resource/common.py
@@ -0,0 +1,203 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+
+from pcs.lib.cib import nvpair
+from pcs.lib.cib.resource.bundle import (
+ is_bundle,
+ get_inner_resource as get_bundle_inner_resource,
+)
+from pcs.lib.cib.resource.clone import (
+ is_any_clone,
+ get_inner_resource as get_clone_inner_resource,
+)
+from pcs.lib.cib.resource.group import (
+ is_group,
+ get_inner_resources as get_group_inner_resources,
+)
+from pcs.lib.cib.resource.primitive import is_primitive
+from pcs.lib.xml_tools import find_parent
+
+
+def are_meta_disabled(meta_attributes):
+ return meta_attributes.get("target-role", "Started").lower() == "stopped"
+
+def _can_be_evaluated_as_positive_num(value):
+ string_wo_leading_zeros = str(value).lstrip("0")
+ return string_wo_leading_zeros and string_wo_leading_zeros[0].isdigit()
+
+def is_clone_deactivated_by_meta(meta_attributes):
+ return are_meta_disabled(meta_attributes) or any([
+ not _can_be_evaluated_as_positive_num(meta_attributes.get(key, "1"))
+ for key in ["clone-max", "clone-node-max"]
+ ])
+
+def find_primitives(resource_el):
+ """
+ Get list of primitives contained in a given resource
+ etree resource_el -- resource element
+ """
+ if is_bundle(resource_el):
+ in_bundle = get_bundle_inner_resource(resource_el)
+ return [in_bundle] if in_bundle is not None else []
+ if is_any_clone(resource_el):
+ resource_el = get_clone_inner_resource(resource_el)
+ if is_group(resource_el):
+ return get_group_inner_resources(resource_el)
+ if is_primitive(resource_el):
+ return [resource_el]
+ return []
+
+def find_resources_to_enable(resource_el):
+ """
+ Get resources to enable in order to enable specified resource succesfully
+ etree resource_el -- resource element
+ """
+ if is_bundle(resource_el):
+ # bundles currently cannot be disabled - pcmk does not support that
+ # inner resources are supposed to be managed separately
+ return []
+
+ if is_any_clone(resource_el):
+ return [resource_el, get_clone_inner_resource(resource_el)]
+
+ to_enable = [resource_el]
+ parent = resource_el.getparent()
+ if is_any_clone(parent):
+ to_enable.append(parent)
+ return to_enable
+
+def enable(resource_el):
+ """
+ Enable specified resource
+ etree resource_el -- resource element
+ """
+ nvpair.arrange_first_nvset(
+ "meta_attributes",
+ resource_el,
+ {
+ "target-role": "",
+ }
+ )
+
+def disable(resource_el):
+ """
+ Disable specified resource
+ etree resource_el -- resource element
+ """
+ nvpair.arrange_first_nvset(
+ "meta_attributes",
+ resource_el,
+ {
+ "target-role": "Stopped",
+ }
+ )
+
+def find_resources_to_manage(resource_el):
+ """
+ Get resources to manage to manage the specified resource succesfully
+ etree resource_el -- resource element
+ """
+ # If the resource_el is a primitive in a group, we set both the group and
+ # the primitive to managed mode. Otherwise the resource_el, all its
+ # children and parents need to be set to managed mode. We do it to make
+ # sure to remove the unmanaged flag form the whole tree. The flag could be
+ # put there manually. If we didn't do it, the resource may stay unmanaged,
+ # as a managed primitive in an unmanaged clone / group is still unmanaged
+ # and vice versa.
+ # Bundle resources cannot be set as unmanaged - pcmk currently doesn't
+ # support that. Resources in a bundle are supposed to be treated separately.
+ if is_bundle(resource_el):
+ return []
+ res_id = resource_el.attrib["id"]
+ return (
+ [resource_el] # the resource itself
+ +
+ # its parents
+ find_parent(resource_el, "resources").xpath(
+ """
+ (./master|./clone)[(group|group/primitive|primitive)[@id='{r}']]
+ |
+ //group[primitive[@id='{r}']]
+ """
+ .format(r=res_id)
+ )
+ +
+ # its children
+ resource_el.xpath("(./group|./primitive|./group/primitive)")
+ )
+
+def find_resources_to_unmanage(resource_el):
+ """
+ Get resources to unmanage to unmanage the specified resource succesfully
+ etree resource_el -- resource element
+ """
+ # resource hierarchy - specified resource - what to return
+ # a primitive - the primitive - the primitive
+ #
+ # a cloned primitive - the primitive - the primitive
+ # a cloned primitive - the clone - the primitive
+ # The resource will run on all nodes after unclone. However that doesn't
+ # seem to be bad behavior. Moreover, if monitor operations were disabled,
+ # they wouldn't enable on unclone, but the resource would become managed,
+ # which is definitely bad.
+ #
+ # a primitive in a group - the primitive - the primitive
+ # Otherwise all primitives in the group would become unmanaged.
+ # a primitive in a group - the group - all primitives in the group
+ # If only the group was set to unmanaged, setting any primitive in the
+ # group to managed would set all the primitives in the group to managed.
+ # If the group as well as all its primitives were set to unmanaged, any
+ # primitive added to the group would become unmanaged. This new primitive
+ # would become managed if any original group primitive becomes managed.
+ # Therefore changing one primitive influences another one, which we do
+ # not want to happen.
+ #
+ # a primitive in a cloned group - the primitive - the primitive
+ # a primitive in a cloned group - the group - all primitives in the group
+ # See group notes above
+ # a primitive in a cloned group - the clone - all primitives in the group
+ # See clone notes above
+ #
+ # a bundled primitive - the primitive - the primitive
+ # a bundled primitive - the bundle - nothing
+ # bundles currently cannot be set as unmanaged - pcmk does not support that
+ # an empty bundle - the bundle - nothing
+ # bundles currently cannot be set as unmanaged - pcmk does not support that
+ if is_any_clone(resource_el):
+ resource_el = get_clone_inner_resource(resource_el)
+ if is_group(resource_el):
+ return get_group_inner_resources(resource_el)
+ if is_primitive(resource_el):
+ return [resource_el]
+ return []
+
+def manage(resource_el):
+ """
+ Set the resource to be managed by the cluster
+ etree resource_el -- resource element
+ """
+ nvpair.arrange_first_nvset(
+ "meta_attributes",
+ resource_el,
+ {
+ "is-managed": "",
+ }
+ )
+
+def unmanage(resource_el):
+ """
+ Set the resource not to be managed by the cluster
+ etree resource_el -- resource element
+ """
+ nvpair.arrange_first_nvset(
+ "meta_attributes",
+ resource_el,
+ {
+ "is-managed": "false",
+ }
+ )
diff --git a/pcs/lib/cib/resource/group.py b/pcs/lib/cib/resource/group.py
new file mode 100644
index 0000000..75a34ee
--- /dev/null
+++ b/pcs/lib/cib/resource/group.py
@@ -0,0 +1,82 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.cib.tools import find_element_by_tag_and_id
+from pcs.lib.errors import LibraryError
+
+
+TAG = "group"
+
+def is_group(resource_el):
+ return resource_el.tag == TAG
+
+def provide_group(resources_section, group_id):
+ """
+ Provide group with id=group_id. Create new group if group with id=group_id
+ does not exists.
+
+ etree.Element resources_section is place where new group will be appended
+ string group_id is id of group
+ """
+ group_element = find_element_by_tag_and_id(
+ TAG,
+ resources_section,
+ group_id,
+ none_if_id_unused=True
+ )
+ if group_element is None:
+ group_element = etree.SubElement(resources_section, TAG, id=group_id)
+ return group_element
+
+def place_resource(
+ group_element, primitive_element,
+ adjacent_resource_id=None, put_after_adjacent=False
+):
+ """
+ Add resource into group. This function is also applicable for a modification
+ of the resource position because the primitive element is replanted from
+ anywhere (including group itself) to concrete place inside group.
+
+ etree.Element group_element is element where to put primitive_element
+ etree.Element primitive_element is element for placement
+ string adjacent_resource_id is id of the existing resource in group.
+ primitive_element will be put beside adjacent_resource_id if specified.
+ bool put_after_adjacent is flag where put primitive_element:
+ before adjacent_resource_id if put_after_adjacent=False
+ after adjacent_resource_id if put_after_adjacent=True
+ Note that it make sense only if adjacent_resource_id is specified
+ """
+ if primitive_element.attrib["id"] == adjacent_resource_id:
+ raise LibraryError(reports.resource_cannot_be_next_to_itself_in_group(
+ adjacent_resource_id,
+ group_element.attrib["id"],
+ ))
+
+ if not adjacent_resource_id:
+ return group_element.append(primitive_element)
+
+ adjacent_resource = find_element_by_tag_and_id(
+ "primitive",
+ group_element,
+ adjacent_resource_id,
+ id_description="resource",
+ )
+
+ if put_after_adjacent and adjacent_resource.getnext() is None:
+ return group_element.append(primitive_element)
+
+ index = group_element.index(
+ adjacent_resource.getnext() if put_after_adjacent
+ else adjacent_resource
+ )
+ group_element.insert(index, primitive_element)
+
+def get_inner_resources(group_el):
+ return group_el.xpath("./primitive")
diff --git a/pcs/lib/cib/resource/guest_node.py b/pcs/lib/cib/resource/guest_node.py
new file mode 100644
index 0000000..cead431
--- /dev/null
+++ b/pcs/lib/cib/resource/guest_node.py
@@ -0,0 +1,243 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.lib import reports, validate
+from pcs.lib.cib.tools import does_id_exist
+from pcs.lib.cib.nvpair import(
+ has_meta_attribute,
+ arrange_first_meta_attributes,
+ get_meta_attribute_value,
+)
+from pcs.lib.node import (
+ NodeAddresses,
+ node_addresses_contain_host,
+ node_addresses_contain_name,
+)
+
+
+#TODO pcs currently does not care about multiple meta_attributes and here
+#we don't care as well
+GUEST_OPTIONS = [
+ 'remote-port',
+ 'remote-addr',
+ 'remote-connect-timeout',
+]
+
+def validate_conflicts(tree, nodes, node_name, options):
+ report_list = []
+ if(
+ does_id_exist(tree, node_name)
+ or
+ node_addresses_contain_name(nodes, node_name)
+ or (
+ "remote-addr" not in options
+ and
+ node_addresses_contain_host(nodes, node_name)
+ )
+ ):
+ report_list.append(reports.id_already_exists(node_name))
+
+ if(
+ "remote-addr" in options
+ and
+ node_addresses_contain_host(nodes, options["remote-addr"])
+ ):
+ report_list.append(reports.id_already_exists(options["remote-addr"]))
+ return report_list
+
+def is_node_name_in_options(options):
+ return "remote-node" in options
+
+def get_guest_option_value(options, default=None):
+ return options.get("remote-node", default)
+
+
+def validate_set_as_guest(tree, nodes, node_name, options):
+ report_list = validate.names_in(
+ GUEST_OPTIONS,
+ options.keys(),
+ "guest",
+ )
+
+ validator_list = [
+ validate.value_time_interval("remote-connect-timeout"),
+ validate.value_port_number("remote-port"),
+ ]
+
+ report_list.extend(
+ validate.run_collection_of_option_validators(options, validator_list)
+ )
+
+ report_list.extend(
+ validate_conflicts(tree, nodes, node_name, options)
+ )
+
+ if not node_name.strip():
+ report_list.append(
+ reports.invalid_option_value(
+ "node name",
+ node_name,
+ "no empty value",
+ )
+ )
+
+ return report_list
+
+def is_guest_node(resource_element):
+ """
+ Return True if resource_element is already set as guest node.
+
+ etree.Element resource_element is a search element
+ """
+ return has_meta_attribute(resource_element, "remote-node")
+
+def validate_is_not_guest(resource_element):
+ """
+ etree.Element resource_element
+ """
+ if not is_guest_node(resource_element):
+ return []
+
+ return [
+ reports.resource_is_guest_node_already(
+ resource_element.attrib["id"]
+ )
+ ]
+
+def set_as_guest(
+ resource_element, node, addr=None, port=None, connect_timeout=None
+):
+ """
+ Set resource as guest node.
+
+ etree.Element resource_element
+
+ """
+ meta_options = {"remote-node": str(node)}
+ if addr:
+ meta_options["remote-addr"] = str(addr)
+ if port:
+ meta_options["remote-port"] = str(port)
+ if connect_timeout:
+ meta_options["remote-connect-timeout"] = str(connect_timeout)
+
+ arrange_first_meta_attributes(resource_element, meta_options)
+
+def unset_guest(resource_element):
+ """
+ Unset resource as guest node.
+
+ etree.Element resource_element
+ """
+ guest_nvpair_list = resource_element.xpath(
+ "./meta_attributes/nvpair[{0}]".format(
+ " or ".join([
+ '@name="{0}"'.format(option)
+ for option in (GUEST_OPTIONS + ["remote-node"])
+ ])
+ )
+ )
+ for nvpair in guest_nvpair_list:
+ meta_attributes = nvpair.getparent()
+ meta_attributes.remove(nvpair)
+ if not len(meta_attributes):
+ meta_attributes.getparent().remove(meta_attributes)
+
+def get_node(meta_attributes):
+ """
+ Return NodeAddresses with corresponding to guest node in meta_attributes.
+ Return None if meta_attributes does not mean guest node
+
+ etree.Element meta_attributes is a researched element
+ """
+ host = None
+ name = None
+ for nvpair in meta_attributes:
+ if nvpair.attrib.get("name", "") == "remote-addr":
+ host = nvpair.attrib["value"]
+ if nvpair.attrib.get("name", "") == "remote-node":
+ name = nvpair.attrib["value"]
+ if host is None:
+ host = name
+ return NodeAddresses(host, name=name) if name else None
+
+def get_host_from_options(node_name, meta_options):
+ """
+ Return host from node_name meta options.
+ dict meta_options
+ """
+ return meta_options.get("remote-addr", node_name)
+
+def get_node_name_from_options(meta_options, default=None):
+ """
+ Return node_name from meta options.
+ dict meta_options
+ """
+ return meta_options.get("remote-node", default)
+
+
+def get_host(resource_element):
+ host = get_meta_attribute_value(resource_element, "remote-addr")
+ if host:
+ return host
+
+ return get_meta_attribute_value(resource_element, "remote-node")
+
+def find_node_list(resources_section):
+ """
+ Return list of nodes from resources_section
+
+ etree.Element resources_section is a researched element
+ """
+ return [
+ get_node(meta_attrs) for meta_attrs in resources_section.xpath("""
+ .//primitive
+ /meta_attributes[
+ nvpair[
+ @name="remote-node"
+ and
+ string-length(@value) > 0
+ ]
+ ]
+ """)
+ ]
+
+def find_node_resources(resources_section, node_identifier):
+ """
+ Return list of etree.Eleent primitives that are guest nodes.
+
+ etree.Element resources_section is a researched element
+ string node_identifier could be id of resource, node name or node address
+ """
+ resources = resources_section.xpath("""
+ .//primitive[
+ (
+ @id="{0}"
+ and
+ meta_attributes[
+ nvpair[
+ @name="remote-node"
+ and
+ string-length(@value) > 0
+ ]
+ ]
+ )
+ or
+ meta_attributes[
+ nvpair[
+ (
+ @name="remote-addr"
+ or
+ @name="remote-node"
+ )
+ and
+ @value="{0}"
+ ]
+ ]
+ ]
+ """.format(node_identifier))
+ return resources
diff --git a/pcs/lib/cib/resource/operations.py b/pcs/lib/cib/resource/operations.py
new file mode 100644
index 0000000..9d8b2ef
--- /dev/null
+++ b/pcs/lib/cib/resource/operations.py
@@ -0,0 +1,364 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from collections import defaultdict
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib import reports, validate
+from pcs.lib.resource_agent import get_default_interval, complete_all_intervals
+from pcs.lib.cib.nvpair import append_new_instance_attributes
+from pcs.lib.cib.tools import create_subelement_id
+from pcs.lib.pacemaker.values import (
+ is_true,
+ timeout_to_seconds,
+)
+
+OPERATION_NVPAIR_ATTRIBUTES = [
+ "OCF_CHECK_LEVEL",
+]
+
+ATTRIBUTES = [
+ "id",
+ "description",
+ "enabled",
+ "interval",
+ "interval-origin",
+ "name",
+ "on-fail",
+ "record-pending",
+ "requires",
+ "role",
+ "start-delay",
+ "timeout",
+ "OCF_CHECK_LEVEL",
+]
+
+ROLE_VALUES = [
+ "Stopped",
+ "Started",
+ "Slave",
+ "Master",
+]
+
+REQUIRES_VALUES = [
+ "nothing",
+ "quorum",
+ "fencing",
+ "unfencing",
+]
+
+ON_FAIL_VALUES = [
+ "ignore",
+ "block",
+ "stop",
+ "restart",
+ "standby",
+ "fence",
+ "restart-container",
+]
+
+BOOLEAN_VALUES = [
+ "0",
+ "1",
+ "true",
+ "false",
+]
+
+#normalize(key, value) -> normalized_value
+normalize = validate.option_value_normalization({
+ "role": lambda value: value.lower().capitalize(),
+ "requires": lambda value: value.lower(),
+ "on-fail": lambda value: value.lower(),
+ "record-pending": lambda value: value.lower(),
+ "enabled": lambda value: value.lower(),
+})
+
+def prepare(
+ report_processor, raw_operation_list, default_operation_list,
+ allowed_operation_name_list, allow_invalid=False
+):
+ """
+ Return operation_list prepared from raw_operation_list and
+ default_operation_list.
+
+ report_processor is tool for warning/info/error reporting
+ list of dicts raw_operation_list are entered operations that require
+ follow-up care
+ list of dicts default_operation_list are operations defined as default by
+ (most probably) resource agent
+ bool allow_invalid is flag for validation skipping
+ """
+ operations_to_validate = operations_to_normalized(raw_operation_list)
+
+ report_list = []
+ report_list.extend(
+ validate_operation_list(
+ operations_to_validate,
+ allowed_operation_name_list,
+ allow_invalid
+ )
+ )
+
+ operation_list = normalized_to_operations(operations_to_validate)
+
+ report_list.extend(validate_different_intervals(operation_list))
+
+ #can raise LibraryError
+ report_processor.process_list(report_list)
+
+ return complete_all_intervals(operation_list) + get_remaining_defaults(
+ report_processor,
+ operation_list,
+ default_operation_list
+ )
+
+def operations_to_normalized(raw_operation_list):
+ return [
+ validate.values_to_pairs(op, normalize) for op in raw_operation_list
+ ]
+
+def normalized_to_operations(normalized_pairs):
+ return [
+ validate.pairs_to_values(op) for op in normalized_pairs
+ ]
+
+def validate_operation_list(
+ operation_list, allowed_operation_name_list, allow_invalid=False
+):
+ options_validators = [
+ validate.is_required("name", "resource operation"),
+ validate.value_in("role", ROLE_VALUES),
+ validate.value_in("requires", REQUIRES_VALUES),
+ validate.value_in("on-fail", ON_FAIL_VALUES),
+ validate.value_in("record-pending", BOOLEAN_VALUES),
+ validate.value_in("enabled", BOOLEAN_VALUES),
+ validate.mutually_exclusive(
+ ["interval-origin", "start-delay"],
+ "resource operation"
+ ),
+ validate.value_in(
+ "name",
+ allowed_operation_name_list,
+ option_name_for_report="operation name",
+ code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
+ allow_extra_values=allow_invalid,
+ ),
+ ]
+ report_list = []
+ for operation in operation_list:
+ report_list.extend(
+ validate_operation(operation, options_validators)
+ )
+ return report_list
+
+def validate_operation(operation, options_validator_list):
+ """
+ Return a list with reports (ReportItems) about problems inside
+ operation.
+ dict operation contains attributes of operation
+ """
+ report_list = validate.names_in(
+ ATTRIBUTES,
+ operation.keys(),
+ "resource operation",
+ )
+
+ report_list.extend(validate.run_collection_of_option_validators(
+ operation,
+ options_validator_list
+ ))
+
+ return report_list
+
+def get_remaining_defaults(
+ report_processor, operation_list, default_operation_list
+):
+ """
+ Return operations not mentioned in operation_list but contained in
+ default_operation_list.
+ report_processor is tool for warning/info/error reporting
+ list operation_list contains dictionaries with attributes of operation
+ list default_operation_list contains dictionaries with attributes of the
+ operation
+ """
+ return make_unique_intervals(
+ report_processor,
+ [
+ default_operation for default_operation in default_operation_list
+ if default_operation["name"] not in [
+ operation["name"] for operation in operation_list
+ ]
+ ]
+ )
+
+def get_interval_uniquer():
+ used_intervals_map = defaultdict(set)
+ def get_uniq_interval(name, initial_interval):
+ """
+ Return unique interval for name based on initial_interval if
+ initial_interval is valid or return initial_interval otherwise.
+
+ string name is the operation name for searching interval
+ initial_interval is starting point for finding free value
+ """
+ used_intervals = used_intervals_map[name]
+ normalized_interval = timeout_to_seconds(initial_interval)
+ if normalized_interval is None:
+ return initial_interval
+
+ if normalized_interval not in used_intervals:
+ used_intervals.add(normalized_interval)
+ return initial_interval
+
+ while normalized_interval in used_intervals:
+ normalized_interval += 1
+ used_intervals.add(normalized_interval)
+ return str(normalized_interval)
+ return get_uniq_interval
+
+def make_unique_intervals(report_processor, operation_list):
+ """
+ Return operation list similar to operation_list where intervals for the same
+ operation are unique
+ report_processor is tool for warning/info/error reporting
+ list operation_list contains dictionaries with attributes of operation
+ """
+ get_unique_interval = get_interval_uniquer()
+ adapted_operation_list = []
+ for operation in operation_list:
+ adapted = operation.copy()
+ if "interval" in adapted:
+ adapted["interval"] = get_unique_interval(
+ operation["name"],
+ operation["interval"]
+ )
+ if adapted["interval"] != operation["interval"]:
+ report_processor.process(
+ reports.resource_operation_interval_adapted(
+ operation["name"],
+ operation["interval"],
+ adapted["interval"],
+ )
+ )
+ adapted_operation_list.append(adapted)
+ return adapted_operation_list
+
+def validate_different_intervals(operation_list):
+ """
+ Check that the same operations (e.g. monitor) have different interval.
+ list operation_list contains dictionaries with attributes of operation
+ return see resource operation in pcs/lib/exchange_formats.md
+ """
+ duplication_map = defaultdict(lambda: defaultdict(list))
+ for operation in operation_list:
+ interval = operation.get(
+ "interval",
+ get_default_interval(operation["name"])
+ )
+ seconds = timeout_to_seconds(interval)
+ duplication_map[operation["name"]][seconds].append(interval)
+
+ duplications = defaultdict(list)
+ for name, interval_map in duplication_map.items():
+ for timeout in sorted(interval_map.values()):
+ if len(timeout) > 1:
+ duplications[name].append(timeout)
+
+ if duplications:
+ return [reports.resource_operation_interval_duplication(
+ dict(duplications)
+ )]
+ return []
+
+def create_id(context_element, name, interval):
+ """
+ Create id for op element.
+ etree context_element is used for the name building
+ string name is the name of the operation
+ mixed interval is the interval attribute of operation
+ """
+ return create_subelement_id(
+ context_element,
+ "{0}-interval-{1}".format(name, interval)
+ )
+
+def create_operations(primitive_element, operation_list):
+ """
+ Create operation element containing operations from operation_list
+ list operation_list contains dictionaries with attributes of operation
+ etree primitive_element is context element
+ """
+ operations_element = etree.SubElement(primitive_element, "operations")
+ for operation in sorted(operation_list, key=lambda op: op["name"]):
+ append_new_operation(operations_element, operation)
+
+def append_new_operation(operations_element, options):
+ """
+ Create op element and apend it to operations_element.
+ etree operations_element is the context element
+ dict options are attributes of operation
+ """
+ attribute_map = dict(
+ (key, value) for key, value in options.items()
+ if key not in OPERATION_NVPAIR_ATTRIBUTES
+ )
+ attribute_map.update({
+ "id": create_id(
+ operations_element.getparent(),
+ options["name"],
+ options["interval"]
+ )
+ })
+ op_element = etree.SubElement(
+ operations_element,
+ "op",
+ attribute_map,
+ )
+ nvpair_attribute_map = dict(
+ (key, value) for key, value in options.items()
+ if key in OPERATION_NVPAIR_ATTRIBUTES
+ )
+
+ if nvpair_attribute_map:
+ append_new_instance_attributes(op_element, nvpair_attribute_map)
+
+ return op_element
+
+def get_resource_operations(resource_el, names=None):
+ """
+ Get operations of a given resource, optionally filtered by name
+ etree resource_el -- resource element
+ iterable names -- return only operations of these names if specified
+ """
+ return [
+ op_el
+ for op_el in resource_el.xpath("./operations/op")
+ if not names or op_el.attrib.get("name", "") in names
+ ]
+
+def disable(operation_element):
+ """
+ Disable the specified operation
+ etree operation_element -- the operation
+ """
+ operation_element.attrib["enabled"] = "false"
+
+def enable(operation_element):
+ """
+ Enable the specified operation
+ etree operation_element -- the operation
+ """
+ operation_element.attrib.pop("enabled", None)
+
+def is_enabled(operation_element):
+ """
+ Check if the specified operation is enabled
+ etree operation_element -- the operation
+ """
+ return is_true(operation_element.attrib.get("enabled", "true"))
diff --git a/pcs/lib/cib/resource/primitive.py b/pcs/lib/cib/resource/primitive.py
new file mode 100644
index 0000000..664aad4
--- /dev/null
+++ b/pcs/lib/cib/resource/primitive.py
@@ -0,0 +1,134 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.cib.nvpair import (
+ append_new_instance_attributes,
+ append_new_meta_attributes,
+)
+from pcs.lib.cib.resource.operations import(
+ prepare as prepare_operations,
+ create_operations,
+)
+from pcs.lib.cib.tools import does_id_exist
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker.values import validate_id
+
+
+TAG = "primitive"
+
+def is_primitive(resource_el):
+ return resource_el.tag == TAG
+
+def create(
+ report_processor, resources_section, resource_id, resource_agent,
+ raw_operation_list=None, meta_attributes=None, instance_attributes=None,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+):
+ """
+ Prepare all parts of primitive resource and append it into cib.
+
+ report_processor is a tool for warning/info/error reporting
+ etree.Element resources_section is place where new element will be appended
+ string resource_id is id of new resource
+ lib.resource_agent.CrmAgent resource_agent
+ list of dict raw_operation_list specifies operations of resource
+ dict meta_attributes specifies meta attributes of resource
+ dict instance_attributes specifies instance attributes of resource
+ bool allow_invalid_operation is flag for skipping validation of operations
+ bool allow_invalid_instance_attributes is flag for skipping validation of
+ instance_attributes
+ bool use_default_operations is flag for completion operations with default
+ actions specified in resource agent
+ """
+ if raw_operation_list is None:
+ raw_operation_list = []
+ if meta_attributes is None:
+ meta_attributes = {}
+ if instance_attributes is None:
+ instance_attributes = {}
+
+ if does_id_exist(resources_section, resource_id):
+ raise LibraryError(reports.id_already_exists(resource_id))
+ validate_id(resource_id, "resource name")
+
+ operation_list = prepare_operations(
+ report_processor,
+ raw_operation_list,
+ resource_agent.get_cib_default_actions(
+ necessary_only=not use_default_operations
+ ),
+ [operation["name"] for operation in resource_agent.get_actions()],
+ allow_invalid=allow_invalid_operation,
+ )
+
+ report_processor.process_list(
+ resource_agent.validate_parameters(
+ instance_attributes,
+ parameters_type="resource",
+ allow_invalid=allow_invalid_instance_attributes,
+ )
+ )
+
+ return append_new(
+ resources_section,
+ resource_id,
+ resource_agent.get_standard(),
+ resource_agent.get_provider(),
+ resource_agent.get_type(),
+ instance_attributes=instance_attributes,
+ meta_attributes=meta_attributes,
+ operation_list=operation_list
+ )
+
+def append_new(
+ resources_section, resource_id, standard, provider, agent_type,
+ instance_attributes=None,
+ meta_attributes=None,
+ operation_list=None
+):
+ """
+ Append a new primitive element to the resources_section.
+
+ etree.Element resources_section is place where new element will be appended
+ string resource_id is id of new resource
+ string standard is a standard of resource agent (e.g. ocf)
+ string agent_type is a type of resource agent (e.g. IPaddr2)
+ string provider is a provider of resource agent (e.g. heartbeat)
+ dict instance_attributes will be nvpairs inside instance_attributes element
+ dict meta_attributes will be nvpairs inside meta_attributes element
+ list operation_list contains dicts representing operations
+ (e.g. [{"name": "monitor"}, {"name": "start"}])
+ """
+ attributes = {
+ "id": resource_id,
+ "class": standard,
+ "type": agent_type,
+ }
+ if provider:
+ attributes["provider"] = provider
+ primitive_element = etree.SubElement(resources_section, TAG, attributes)
+
+ if instance_attributes:
+ append_new_instance_attributes(
+ primitive_element,
+ instance_attributes
+ )
+
+ if meta_attributes:
+ append_new_meta_attributes(primitive_element, meta_attributes)
+
+ create_operations(
+ primitive_element,
+ operation_list if operation_list else []
+ )
+
+ return primitive_element
diff --git a/pcs/lib/cib/resource/remote_node.py b/pcs/lib/cib/resource/remote_node.py
new file mode 100644
index 0000000..36db850
--- /dev/null
+++ b/pcs/lib/cib/resource/remote_node.py
@@ -0,0 +1,219 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.common import report_codes
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.cib.resource import primitive
+from pcs.lib.node import(
+ NodeAddresses,
+ node_addresses_contain_host,
+ node_addresses_contain_name,
+)
+from pcs.lib.resource_agent import(
+ find_valid_resource_agent_by_name,
+ ResourceAgentName,
+)
+
+AGENT_NAME = ResourceAgentName("ocf", "pacemaker", "remote")
+
+def get_agent(report_processor, cmd_runner):
+ return find_valid_resource_agent_by_name(
+ report_processor,
+ cmd_runner,
+ AGENT_NAME.full_name,
+ )
+
+_IS_REMOTE_AGENT_XPATH_SNIPPET = """
+ @class="{0}" and @provider="{1}" and @type="{2}"
+""".format(AGENT_NAME.standard, AGENT_NAME.provider, AGENT_NAME.type)
+
+_HAS_SERVER_XPATH_SNIPPET = """
+ instance_attributes/nvpair[
+ @name="server"
+ and
+ string-length(@value) > 0
+ ]
+"""
+
+
+
+def find_node_list(resources_section):
+ node_list = [
+ NodeAddresses(
+ nvpair.attrib["value"],
+ name=nvpair.getparent().getparent().attrib["id"]
+ )
+ for nvpair in resources_section.xpath(
+ ".//primitive[{is_remote}]/{has_server}"
+ .format(
+ is_remote=_IS_REMOTE_AGENT_XPATH_SNIPPET,
+ has_server=_HAS_SERVER_XPATH_SNIPPET,
+ )
+ )
+ ]
+
+ node_list.extend([
+ NodeAddresses(primitive.attrib["id"], name=primitive.attrib["id"])
+ for primitive in resources_section.xpath(
+ ".//primitive[{is_remote} and not({has_server})]"
+ .format(
+ is_remote=_IS_REMOTE_AGENT_XPATH_SNIPPET,
+ has_server=_HAS_SERVER_XPATH_SNIPPET,
+ )
+ )
+ ])
+
+ return node_list
+
+def find_node_resources(resources_section, node_identifier):
+ """
+ Return list of resource elements that match to node_identifier
+
+ etree.Element resources_section is a search element
+ string node_identifier could be id of the resource or its instance attribute
+ "server"
+ """
+ return resources_section.xpath(
+ """
+ .//primitive[
+ {is_remote} and (
+ @id="{identifier}"
+ or
+ instance_attributes/nvpair[
+ @name="server"
+ and
+ @value="{identifier}"
+ ]
+ )
+ ]
+ """
+ .format(
+ is_remote=_IS_REMOTE_AGENT_XPATH_SNIPPET,
+ identifier=node_identifier
+ )
+ )
+
+def get_host(resource_element):
+ """
+ Return first host from resource element if is there. Return None if host is
+ not there.
+
+ etree.Element resource_element
+ """
+ if not (
+ resource_element.attrib.get("class", "") == AGENT_NAME.standard
+ and
+ resource_element.attrib.get("provider", "") == AGENT_NAME.provider
+ and
+ resource_element.attrib.get("type", "") == AGENT_NAME.type
+ ):
+ return None
+
+
+ host_list = resource_element.xpath(
+ "./{has_server}/@value".format(has_server=_HAS_SERVER_XPATH_SNIPPET)
+ )
+ if host_list:
+ return host_list[0]
+ return resource_element.attrib["id"]
+
+def _validate_server_not_used(agent, option_dict):
+ if "server" in option_dict:
+ return [reports.invalid_option(
+ ["server"],
+ sorted([
+ attr["name"] for attr in agent.get_parameters()
+ if attr["name"] != "server"
+ ]),
+ "resource",
+ )]
+ return []
+
+
+def validate_host_not_conflicts(nodes, node_name, instance_attributes):
+ host = instance_attributes.get("server", node_name)
+ if node_addresses_contain_host(nodes, host):
+ return [reports.id_already_exists(host)]
+ return []
+
+def validate_create(
+ nodes, resource_agent, host, node_name, instance_attributes
+):
+ """
+ validate inputs for create
+
+ list of NodeAddresses nodes -- nodes already used
+ string node_name -- name of future node
+ dict instance_attributes -- data for future resource instance attributes
+ """
+ report_list = _validate_server_not_used(resource_agent, instance_attributes)
+
+ host_is_used = False
+ if node_addresses_contain_host(nodes, host):
+ report_list.append(reports.id_already_exists(host))
+ host_is_used = True
+
+ if not host_is_used or host != node_name:
+ if node_addresses_contain_name(nodes, node_name):
+ report_list.append(reports.id_already_exists(node_name))
+
+ return report_list
+
+def prepare_instance_atributes(instance_attributes, host):
+ enriched_instance_attributes = instance_attributes.copy()
+ enriched_instance_attributes["server"] = host
+ return enriched_instance_attributes
+
+def create(
+ report_processor, resource_agent, resources_section, host, node_name,
+ raw_operation_list=None, meta_attributes=None,
+ instance_attributes=None,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+):
+ """
+ Prepare all parts of remote resource and append it into the cib.
+
+ report_processor is a tool for warning/info/error reporting
+ cmd_runner is tool for launching external commands
+ etree.Element resources_section is place where new element will be appended
+ string node_name is name of the remote node and id of new resource as well
+ list of dict raw_operation_list specifies operations of resource
+ dict meta_attributes specifies meta attributes of resource
+ dict instance_attributes specifies instance attributes of resource
+ bool allow_invalid_operation is flag for skipping validation of operations
+ bool allow_invalid_instance_attributes is flag for skipping validation of
+ instance_attributes
+ bool use_default_operations is flag for completion operations with default
+ actions specified in resource agent
+ """
+ all_instance_attributes = instance_attributes.copy()
+ if host != node_name:
+ all_instance_attributes.update({"server": host})
+ try:
+ return primitive.create(
+ report_processor,
+ resources_section,
+ node_name,
+ resource_agent,
+ raw_operation_list,
+ meta_attributes,
+ all_instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ except LibraryError as e:
+ for report in e.args:
+ if report.code == report_codes.INVALID_OPTION:
+ report.info["allowed"] = [
+ value for value in report.info["allowed"]
+ if value != "server"
+ ]
+ raise e
diff --git a/pcs/lib/cib/stonith.py b/pcs/lib/cib/stonith.py
new file mode 100644
index 0000000..d588c4d
--- /dev/null
+++ b/pcs/lib/cib/stonith.py
@@ -0,0 +1,15 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+# TODO replace by the new finding function
+def is_stonith_resource(resources_el, name):
+ return len(
+ resources_el.xpath(
+ "primitive[@id='{0}' and @class='stonith']".format(name)
+ )
+ ) > 0
+
diff --git a/pcs/test/test_lib_cib_acl.py b/pcs/lib/cib/test/test_acl.py
similarity index 80%
rename from pcs/test/test_lib_cib_acl.py
rename to pcs/lib/cib/test/test_acl.py
index 56b48eb..7171a32 100644
--- a/pcs/test/test_lib_cib_acl.py
+++ b/pcs/lib/cib/test/test_acl.py
@@ -9,7 +9,6 @@ from lxml import etree
from pcs.test.tools.assertions import (
assert_raise_library_error,
- assert_report_item_equal,
assert_xml_equal,
ExtendedAssertionsMixin,
)
@@ -19,10 +18,8 @@ from pcs.test.tools.pcs_unittest import mock, TestCase
from pcs.common import report_codes
from pcs.lib.cib import acl as lib
-from pcs.lib.errors import (
- LibraryError,
- ReportItemSeverity as severities,
-)
+from pcs.lib.cib.tools import get_acls
+from pcs.lib.errors import ReportItemSeverity as severities, LibraryError
class LibraryAclTest(TestCase):
def setUp(self):
@@ -31,6 +28,10 @@ class LibraryAclTest(TestCase):
)
self.cib = self.create_cib()
+ @property
+ def acls(self):
+ return get_acls(self.cib.tree)
+
def fixture_add_role(self, role_id):
self.cib.append_to_first_tag_name(
'configuration',
@@ -158,74 +159,10 @@ class ValidatePermissionsTest(LibraryAclTest):
)
-class FindRoleTest(LibraryAclTest, ExtendedAssertionsMixin):
- def test_success(self):
- xml = """
- <xml>
- <acl_role id="role-id"/>
- <role id="role-id"/>
- </xml>
- """
- assert_xml_equal(
- '<acl_role id="role-id"/>',
- etree.tostring(lib.find_role(etree.XML(xml), "role-id")).decode()
- )
-
- def test_not_exist(self):
- xml = """
- <xml>
- <role id="role-id"/>
- </xml>
- """
- self.assert_raises(
- lib.AclRoleNotFound,
- lambda: lib.find_role(etree.XML(xml), "role-id"),
- {"role_id": "role-id"}
- )
-
-
-class FindPermissionTest(LibraryAclTest):
- def test_success(self):
- xml = """
- <cib>
- <acls>
- <acl_permission id="permission-id"/>
- <acl_permission id="permission-id2"/>
- </acls>
- </cib>
- """
- assert_xml_equal(
- '<acl_permission id="permission-id"/>',
- etree.tostring(lib._find_permission(
- etree.XML(xml), "permission-id")
- ).decode()
- )
-
- def test_not_exist(self):
- xml = """
- <cib>
- <acls>
- <acl_permission id="permission-id2"/>
- </acls>
- </cib>
- """
- assert_raise_library_error(
- lambda: lib._find_permission(etree.XML(xml), "permission-id"),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "permission-id",
- "id_description": "permission",
- }
- )
- )
-
-
class CreateRoleTest(LibraryAclTest):
def test_create_for_new_role_id(self):
role_id = 'new-id'
- lib.create_role(self.cib.tree, role_id)
+ lib.create_role(self.acls, role_id)
self.assert_cib_equal(
self.create_cib().append_to_first_tag_name(
@@ -311,13 +248,19 @@ class RemoveRoleTest(LibraryAclTest, ExtendedAssertionsMixin):
assert_xml_equal(expected_xml, etree.tostring(self.tree).decode())
def test_id_not_exists(self):
- self.assert_raises(
- lib.AclRoleNotFound,
- lambda: lib.remove_role(self.tree, "id-of-role"),
- {"role_id": "id-of-role"}
+ assert_raise_library_error(
+ lambda: lib.remove_role(self.tree.find(".//acls"), "id-of-role"),
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "context_type": "acls",
+ "context_id": "",
+ "id": "id-of-role",
+ },
+ ),
)
-
class AssignRoleTest(LibraryAclTest):
def setUp(self):
LibraryAclTest.setUp(self)
@@ -336,11 +279,8 @@ class AssignRoleTest(LibraryAclTest):
)
def test_success_target(self):
- target = self.cib.tree.find(
- ".//acl_target[@id='{0}']".format("target1")
- )
- role = self.cib.tree.find(".//acl_role[@id='{0}']".format("role1"))
- lib.assign_role(target, role)
+ target = self.cib.tree.find(".//acl_target[@id='target1']")
+ lib.assign_role(self.cib.tree, "role1", target)
self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
"configuration",
"""
@@ -357,9 +297,8 @@ class AssignRoleTest(LibraryAclTest):
))
def test_sucess_group(self):
- group = self.cib.tree.find(".//acl_group[@id='{0}']".format("group1"))
- role = self.cib.tree.find(".//acl_role[@id='{0}']".format("role1"))
- lib.assign_role(group, role)
+ group = self.cib.tree.find(".//acl_group[@id='group1']")
+ lib.assign_role(self.cib.tree, "role1", group)
self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
"configuration",
"""
@@ -377,12 +316,9 @@ class AssignRoleTest(LibraryAclTest):
))
def test_role_already_assigned(self):
- target = self.cib.tree.find(
- ".//acl_target[@id='{0}']".format("target1")
- )
- role = self.cib.tree.find(".//acl_role[@id='{0}']".format("role2"))
+ target = self.cib.tree.find(".//acl_target[@id='target1']")
assert_raise_library_error(
- lambda: lib.assign_role(target, role),
+ lambda: lib.assign_role(self.cib.tree, "role2", target),
(
severities.ERROR,
report_codes.CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET,
@@ -394,6 +330,27 @@ class AssignRoleTest(LibraryAclTest):
)
+ at mock.patch("pcs.lib.cib.acl._assign_role")
+class AssignAllRoles(TestCase):
+ def test_success(self, assign_role):
+ assign_role.return_value = []
+ lib.assign_all_roles("acl_section", ["1", "2", "3"], "element")
+ assign_role.assert_has_calls([
+ mock.call("acl_section", "1", "element"),
+ mock.call("acl_section", "2", "element"),
+ mock.call("acl_section", "3", "element"),
+ ], any_order=True)
+
+ def test_fail_on_error_report(self, assign_role):
+ assign_role.return_value = ['report']
+ self.assertRaises(
+ LibraryError,
+ lambda:
+ lib.assign_all_roles("acl_section", ["1", "2", "3"], "element")
+ )
+
+
+
class UnassignRoleTest(LibraryAclTest):
def setUp(self):
LibraryAclTest.setUp(self)
@@ -508,64 +465,6 @@ class UnassignRoleTest(LibraryAclTest):
))
-class FindTargetTest(LibraryAclTest, ExtendedAssertionsMixin):
- def setUp(self):
- LibraryAclTest.setUp(self)
- self.cib.append_to_first_tag_name(
- "configuration", '<acl_target id="target1"/>'
- )
- self.cib.append_to_first_tag_name(
- "configuration",
- """
- <acls>
- <acl_target id="target1" description="test"/>
- </acls>
- """
- )
-
- def test_success(self):
- assert_xml_equal(
- '<acl_target id="target1" description="test"/>',
- etree.tostring(lib.find_target(self.cib.tree, "target1")).decode()
- )
-
- def test_not_found(self):
- self.assert_raises(
- lib.AclTargetNotFound,
- lambda: lib.find_target(self.cib.tree, "target2"),
- {"target_id": "target2"}
- )
-
-
-class FindGroupTest(LibraryAclTest, ExtendedAssertionsMixin):
- def setUp(self):
- LibraryAclTest.setUp(self)
- self.cib.append_to_first_tag_name(
- "configuration", '<acl_group id="group2"/>'
- )
- self.cib.append_to_first_tag_name(
- "configuration",
- """
- <acls>
- <acl_group id="group1" description="desc"/>
- </acls>
- """
- )
-
- def test_success(self):
- assert_xml_equal(
- '<acl_group id="group1" description="desc"/>',
- etree.tostring(lib.find_group(self.cib.tree, "group1")).decode()
- )
-
- def test_not_found(self):
- self.assert_raises(
- lib.AclGroupNotFound,
- lambda: lib.find_group(self.cib.tree, "group2"),
- {"group_id": "group2"}
- )
-
-
class AddPermissionsToRoleTest(LibraryAclTest):
def test_add_for_correct_permissions(self):
role_id = 'role1'
@@ -590,7 +489,7 @@ class AddPermissionsToRoleTest(LibraryAclTest):
class ProvideRoleTest(LibraryAclTest):
def test_add_role_for_nonexisting_id(self):
role_id = 'new-id'
- lib.provide_role(self.cib.tree, role_id)
+ lib.provide_role(self.acls, role_id)
self.assert_cib_equal(
self.create_cib().append_to_first_tag_name('configuration', '''
@@ -622,7 +521,7 @@ class CreateTargetTest(LibraryAclTest):
self.cib.append_to_first_tag_name("acls", '<acl_target id="target2"/>')
def test_success(self):
- lib.create_target(self.cib.tree, "target1")
+ lib.create_target(self.acls, "target1")
self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
"configuration",
"""
@@ -635,7 +534,7 @@ class CreateTargetTest(LibraryAclTest):
))
def test_target_id_is_not_unique_id(self):
- lib.create_target(self.cib.tree, "target3")
+ lib.create_target(self.acls, "target3")
self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
"configuration",
"""
@@ -649,7 +548,7 @@ class CreateTargetTest(LibraryAclTest):
def test_target_id_is_not_unique_target_id(self):
assert_raise_library_error(
- lambda: lib.create_target(self.cib.tree, "target2"),
+ lambda: lib.create_target(self.acls, "target2"),
(
severities.ERROR,
report_codes.CIB_ACL_TARGET_ALREADY_EXISTS,
@@ -664,7 +563,7 @@ class CreateGroupTest(LibraryAclTest):
self.fixture_add_role("group2")
def test_success(self):
- lib.create_group(self.cib.tree, "group1")
+ lib.create_group(self.acls, "group1")
self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
"configuration",
"""
@@ -677,7 +576,7 @@ class CreateGroupTest(LibraryAclTest):
def test_existing_id(self):
assert_raise_library_error(
- lambda: lib.create_group(self.cib.tree, "group2"),
+ lambda: lib.create_group(self.acls, "group2"),
(
severities.ERROR,
report_codes.ID_ALREADY_EXISTS,
@@ -705,11 +604,15 @@ class RemoveTargetTest(LibraryAclTest, ExtendedAssertionsMixin):
def test_not_existing(self):
assert_raise_library_error(
- lambda: lib.remove_target(self.cib.tree, "target2"),
+ lambda: lib.remove_target(self.acls, "target2"),
(
severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {"id": "target2"}
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ {
+ "id": "target2",
+ "expected_types": ["acl_target"],
+ "current_type": "acl_role",
+ }
)
)
@@ -736,8 +639,12 @@ class RemoveGroupTest(LibraryAclTest, ExtendedAssertionsMixin):
lambda: lib.remove_group(self.cib.tree, "group2"),
(
severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {"id": "group2"}
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ {
+ "id": "group2",
+ "expected_types": ["acl_group"],
+ "current_type": "acl_role",
+ }
)
)
@@ -814,10 +721,11 @@ class RemovePermissionTest(LibraryAclTest):
lambda: lib.remove_permission(self.tree, "role-id"),
(
severities.ERROR,
- report_codes.ID_NOT_FOUND,
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
{
"id": "role-id",
- "id_description": "permission",
+ "expected_types": ["acl_permission"],
+ "current_type": "acl_role",
}
)
)
@@ -889,7 +797,7 @@ class GetRoleListTest(LibraryAclTest):
"permission_list": [],
}
]
- self.assertEqual(expected, lib.get_role_list(self.cib.tree))
+ self.assertEqual(expected, lib.get_role_list(self.acls))
class GetPermissionListTest(LibraryAclTest):
@@ -956,7 +864,7 @@ class GetPermissionListTest(LibraryAclTest):
self.assertEqual(expected, lib._get_permission_list(role_el))
- at mock.patch("pcs.lib.cib.acl._get_target_like_list_with_tag")
+ at mock.patch("pcs.lib.cib.acl.get_target_like_list")
class GetTargetListTest(TestCase):
def test_success(self, mock_fn):
mock_fn.return_value = "returned data"
@@ -964,7 +872,7 @@ class GetTargetListTest(TestCase):
mock_fn.assert_called_once_with("tree", "acl_target")
- at mock.patch("pcs.lib.cib.acl._get_target_like_list_with_tag")
+ at mock.patch("pcs.lib.cib.acl.get_target_like_list")
class GetGroupListTest(TestCase):
def test_success(self, mock_fn):
mock_fn.return_value = "returned data"
@@ -1005,7 +913,7 @@ class GetTargetLikeListWithTagTest(LibraryAclTest):
"role_list": ["role1", "role2", "role3"],
}
],
- lib._get_target_like_list_with_tag(self.cib.tree, "acl_target")
+ lib.get_target_like_list(self.acls, "acl_target")
)
def test_success_groups(self):
@@ -1020,7 +928,7 @@ class GetTargetLikeListWithTagTest(LibraryAclTest):
"role_list": [],
}
],
- lib._get_target_like_list_with_tag(self.cib.tree, "acl_group")
+ lib.get_target_like_list(self.acls, "acl_group")
)
@@ -1036,49 +944,73 @@ class GetRoleListOfTargetTest(LibraryAclTest):
)
-class AclErrorToReportItemTest(TestCase, ExtendedAssertionsMixin):
- def test_acl_target_not_found(self):
- assert_report_item_equal(
- lib.acl_error_to_report_item(lib.AclTargetNotFound("id")),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "id",
- "id_description": "user",
- }
- )
+ at mock.patch("pcs.lib.cib.acl.find_group")
+ at mock.patch("pcs.lib.cib.acl.find_target")
+class FindTargetOrGroup(TestCase):
+ def test_returns_target(self, find_target, find_group):
+ find_target.return_value = "target_element"
+ self.assertEqual(
+ lib.find_target_or_group("acl_section", "target_id"),
+ "target_element"
)
-
- def test_acl_group_not_found(self):
- assert_report_item_equal(
- lib.acl_error_to_report_item(lib.AclGroupNotFound("id")),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "id",
- "id_description": "group",
- }
- )
+ find_target.assert_called_once_with(
+ "acl_section",
+ "target_id",
+ none_if_id_unused=True
)
- def test_acl_role_not_found(self):
- assert_report_item_equal(
- lib.acl_error_to_report_item(lib.AclRoleNotFound("id")),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "id",
- "id_description": "role",
- }
- )
+ def test_returns_group_if_target_is_none(self, find_target, find_group):
+ find_target.return_value = None
+ find_group.return_value = "group_element"
+ self.assertEqual(
+ lib.find_target_or_group("acl_section", "group_id"),
+ "group_element"
+ )
+ find_target.assert_called_once_with(
+ "acl_section",
+ "group_id",
+ none_if_id_unused=True
+ )
+ find_group.assert_called_once_with(
+ "acl_section",
+ "group_id",
+ id_description="user/group"
)
- def test_unknown_exception(self):
- self.assert_raises(
- LibraryError,
- lambda: lib.acl_error_to_report_item(LibraryError())
+
+class Find(TestCase):
+ def test_refuses_bad_tag(self):
+ self.assertRaises(
+ AssertionError,
+ lambda: lib._find("bad_tag", "acl_section", "id")
)
+ @mock.patch("pcs.lib.cib.acl.find_element_by_tag_and_id")
+ def test_map_well_to_common_finder(self, common_finder):
+ common_finder.return_value = "element"
+ self.assertEqual("element", lib._find(
+ lib.TAG_GROUP, "acl_section", "group_id",
+ none_if_id_unused=True,
+ id_description="some description"
+ ))
+ common_finder.assert_called_once_with(
+ lib.TAG_GROUP,
+ "acl_section",
+ "group_id",
+ none_if_id_unused=True,
+ id_description="some description"
+ )
+
+ @mock.patch("pcs.lib.cib.acl.find_element_by_tag_and_id")
+ def test_map_well_to_common_finder_with_automatic_desc(self, common_finder):
+ common_finder.return_value = "element"
+ self.assertEqual("element", lib._find(
+ lib.TAG_GROUP, "acl_section", "group_id", none_if_id_unused=True
+ ))
+ common_finder.assert_called_once_with(
+ lib.TAG_GROUP,
+ "acl_section",
+ "group_id",
+ none_if_id_unused=True,
+ id_description=lib.TAG_DESCRIPTION_MAP[lib.TAG_GROUP]
+ )
diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
index 7739f2b..ca0bd2b 100644
--- a/pcs/lib/cib/test/test_alert.py
+++ b/pcs/lib/cib/test/test_alert.py
@@ -36,136 +36,6 @@ class UpdateOptionalAttributeTest(TestCase):
alert._update_optional_attribute(element, "attr", "")
self.assertTrue(element.get("attr") is None)
-
-class GetAlertByIdTest(TestCase):
- def test_found(self):
- xml = """
- <cib>
- <configuration>
- <alerts>
- <alert id="alert-1"/>
- <alert id="alert-2"/>
- </alerts>
- </configuration>
- </cib>
- """
- assert_xml_equal(
- '<alert id="alert-2"/>',
- etree.tostring(
- alert.get_alert_by_id(etree.XML(xml), "alert-2")
- ).decode()
- )
-
- def test_different_place(self):
- xml = """
- <cib>
- <configuration>
- <alerts>
- <alert id="alert-1"/>
- </alerts>
- <alert id="alert-2"/>
- </configuration>
- </cib>
- """
- assert_raise_library_error(
- lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"),
- (
- severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "alert-2"}
- )
- )
-
- def test_not_exist(self):
- xml = """
- <cib>
- <configuration>
- <alerts>
- <alert id="alert-1"/>
- </alerts>
- </configuration>
- </cib>
- """
- assert_raise_library_error(
- lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"),
- (
- severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "alert-2"}
- )
- )
-
-
-class GetRecipientByIdTest(TestCase):
- def setUp(self):
- self.xml = etree.XML(
- """
- <cib>
- <configuration>
- <alerts>
- <alert id="alert-1">
- <recipient id="rec-1" value="value1"/>
- <not_recipient id="rec-3" value="value3"/>
- <recipients>
- <recipient id="rec-4" value="value4"/>
- </recipients>
- </alert>
- <recipient id="rec-2" value="value2"/>
- </alerts>
- <alert id="alert-2"/>
- </configuration>
- </cib>
- """
- )
-
- def test_exist(self):
- assert_xml_equal(
- '<recipient id="rec-1" value="value1"/>',
- etree.tostring(
- alert.get_recipient_by_id(self.xml, "rec-1")
- ).decode()
- )
-
- def test_different_place(self):
- assert_raise_library_error(
- lambda: alert.get_recipient_by_id(self.xml, "rec-4"),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "rec-4",
- "id_description": "Recipient"
- }
- )
- )
-
- def test_not_in_alert(self):
- assert_raise_library_error(
- lambda: alert.get_recipient_by_id(self.xml, "rec-2"),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "rec-2",
- "id_description": "Recipient"
- }
- )
- )
-
- def test_not_recipient(self):
- assert_raise_library_error(
- lambda: alert.get_recipient_by_id(self.xml, "rec-3"),
- (
- severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "rec-3",
- "id_description": "Recipient"
- }
- )
- )
-
-
class EnsureRecipientValueIsUniqueTest(TestCase):
def setUp(self):
self.mock_reporter = MockLibraryReportProcessor()
@@ -472,8 +342,13 @@ class UpdateAlertTest(TestCase):
lambda: alert.update_alert(self.tree, "alert0", "/test"),
(
severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "alert0"}
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "alert0",
+ "context_type": "alerts",
+ "context_id": "",
+ "id_description": "alert"
+ }
)
)
@@ -513,8 +388,13 @@ class RemoveAlertTest(TestCase):
lambda: alert.remove_alert(self.tree, "not-existing-id"),
(
severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "not-existing-id"}
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "not-existing-id",
+ "context_type": "alerts",
+ "context_id": "",
+ "id_description": "alert"
+ }
)
)
@@ -668,8 +548,13 @@ class AddRecipientTest(TestCase):
),
(
severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "alert1"}
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "alert1",
+ "context_type": "alerts",
+ "context_id": "",
+ "id_description": "alert"
+ }
)
)
@@ -990,7 +875,7 @@ class UpdateRecipientTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "recipient",
- "id_description": "Recipient"
+ "id_description": "recipient"
}
)
)
@@ -1038,7 +923,9 @@ class RemoveRecipientTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "recipient",
- "id_description": "Recipient"
+ "context_type": "alerts",
+ "context_id": "",
+ "id_description": "recipient",
}
)
)
diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py
index a4ee636..fe9cc42 100644
--- a/pcs/lib/cib/test/test_constraint.py
+++ b/pcs/lib/cib/test/test_constraint.py
@@ -31,7 +31,7 @@ def fixture_element(tag, id):
return element
@mock.patch("pcs.lib.cib.constraint.constraint.find_parent")
- at mock.patch("pcs.lib.cib.constraint.constraint.resource.find_by_id")
+ at mock.patch("pcs.lib.cib.constraint.constraint.find_element_by_tag_and_id")
class FindValidResourceId(TestCase):
def setUp(self):
self.cib = "cib"
@@ -44,28 +44,47 @@ class FindValidResourceId(TestCase):
in_clone_allowed=False,
)
- def test_raises_when_element_not_found(self, mock_find_by_id, _):
- mock_find_by_id.return_value = None
- assert_raise_library_error(
- lambda: self.find(id="resourceA"),
- (
- severities.ERROR,
- report_codes.RESOURCE_DOES_NOT_EXIST,
- {"resource_id": "resourceA"}
- ),
+ def fixture_error_multiinstance(self, parent_type, parent_id):
+ return (
+ severities.ERROR,
+ report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
+ {
+ "resource_id": "resourceA",
+ "parent_type": parent_type,
+ "parent_id": parent_id,
+ },
+ report_codes.FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE
+ )
+
+ def fixture_warning_multiinstance(self, parent_type, parent_id):
+ return (
+ severities.WARNING,
+ report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
+ {
+ "resource_id": "resourceA",
+ "parent_type": parent_type,
+ "parent_id": parent_id,
+ },
+ None
)
def test_return_same_id_when_resource_is_clone(self, mock_find_by_id, _):
mock_find_by_id.return_value = fixture_element("clone", "resourceA")
self.assertEqual("resourceA", self.find(id="resourceA"))
+ def test_return_same_id_when_resource_is_master(self, mock_find_by_id, _):
+ mock_find_by_id.return_value = fixture_element("master", "resourceA")
+ self.assertEqual("resourceA", self.find(id="resourceA"))
+
+ def test_return_same_id_when_resource_is_bundle(self, mock_find_by_id, _):
+ mock_find_by_id.return_value = fixture_element("bundle", "resourceA")
+ self.assertEqual("resourceA", self.find(id="resourceA"))
- def test_return_same_id_when_is_primitive_but_not_in_clone(
+ def test_return_same_id_when_resource_is_standalone_primitive(
self, mock_find_by_id, mock_find_parent
):
mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
mock_find_parent.return_value = None
-
self.assertEqual("resourceA", self.find(id="resourceA"))
def test_refuse_when_resource_is_in_clone(
@@ -73,19 +92,29 @@ class FindValidResourceId(TestCase):
):
mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
mock_find_parent.return_value = fixture_element("clone", "clone_id")
+ assert_raise_library_error(
+ lambda: self.find(id="resourceA"),
+ self.fixture_error_multiinstance("clone", "clone_id"),
+ )
+
+ def test_refuse_when_resource_is_in_master(
+ self, mock_find_by_id, mock_find_parent
+ ):
+ mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+ mock_find_parent.return_value = fixture_element("master", "master_id")
+ assert_raise_library_error(
+ lambda: self.find(id="resourceA"),
+ self.fixture_error_multiinstance("master", "master_id"),
+ )
+ def test_refuse_when_resource_is_in_bundle(
+ self, mock_find_by_id, mock_find_parent
+ ):
+ mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+ mock_find_parent.return_value = fixture_element("bundle", "bundle_id")
assert_raise_library_error(
lambda: self.find(id="resourceA"),
- (
- severities.ERROR,
- report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
- {
- "resource_id": "resourceA",
- "parent_type": "clone",
- "parent_id": "clone_id",
- },
- report_codes.FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE
- ),
+ self.fixture_error_multiinstance("bundle", "bundle_id"),
)
def test_return_clone_id_when_repair_allowed(
@@ -102,6 +131,34 @@ class FindValidResourceId(TestCase):
self.report_processor.report_item_list, []
)
+ def test_return_master_id_when_repair_allowed(
+ self, mock_find_by_id, mock_find_parent
+ ):
+ mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+ mock_find_parent.return_value = fixture_element("master", "master_id")
+
+ self.assertEqual(
+ "master_id",
+ self.find(can_repair_to_clone=True, id="resourceA")
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
+ def test_return_bundle_id_when_repair_allowed(
+ self, mock_find_by_id, mock_find_parent
+ ):
+ mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+ mock_find_parent.return_value = fixture_element("bundle", "bundle_id")
+
+ self.assertEqual(
+ "bundle_id",
+ self.find(can_repair_to_clone=True, id="resourceA")
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list, []
+ )
+
def test_return_resource_id_when_in_clone_allowed(
self, mock_find_by_id, mock_find_parent
):
@@ -112,15 +169,46 @@ class FindValidResourceId(TestCase):
"resourceA",
self.find(in_clone_allowed=True, id="resourceA")
)
- assert_report_item_list_equal(self.report_processor.report_item_list, [(
- severities.WARNING,
- report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
- {
- "resource_id": "resourceA",
- "parent_type": "clone",
- "parent_id": "clone_id",
- },
- )])
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ self.fixture_warning_multiinstance("clone", "clone_id"),
+ ]
+ )
+
+ def test_return_resource_id_when_in_master_allowed(
+ self, mock_find_by_id, mock_find_parent
+ ):
+ mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+ mock_find_parent.return_value = fixture_element("master", "master_id")
+
+ self.assertEqual(
+ "resourceA",
+ self.find(in_clone_allowed=True, id="resourceA")
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ self.fixture_warning_multiinstance("master", "master_id"),
+ ]
+ )
+
+ def test_return_resource_id_when_in_bundle_allowed(
+ self, mock_find_by_id, mock_find_parent
+ ):
+ mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+ mock_find_parent.return_value = fixture_element("bundle", "bundle_id")
+
+ self.assertEqual(
+ "resourceA",
+ self.find(in_clone_allowed=True, id="resourceA")
+ )
+ assert_report_item_list_equal(
+ self.report_processor.report_item_list,
+ [
+ self.fixture_warning_multiinstance("bundle", "bundle_id"),
+ ]
+ )
class PrepareOptionsTest(TestCase):
def test_refuse_unknown_option(self):
@@ -132,7 +220,7 @@ class PrepareOptionsTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "b",
+ "option_names": ["b"],
"option_type": None,
"allowed": ["a", "id"],
}
diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py
index 6a85d8a..1b38253 100644
--- a/pcs/lib/cib/test/test_constraint_colocation.py
+++ b/pcs/lib/cib/test/test_constraint_colocation.py
@@ -86,7 +86,7 @@ class PrepareOptionsWithSetTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "unknown",
+ "option_names": ["unknown"],
"option_type": None,
"allowed": [
"id",
diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py
index 3cb33d1..90373b2 100644
--- a/pcs/lib/cib/test/test_constraint_order.py
+++ b/pcs/lib/cib/test/test_constraint_order.py
@@ -95,7 +95,7 @@ class PrepareOptionsWithSetTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "unknown",
+ "option_names": ["unknown"],
"option_type": None,
"allowed": [ "id", "kind", "symmetrical"],
}
diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
index b720b55..5a39388 100644
--- a/pcs/lib/cib/test/test_constraint_ticket.py
+++ b/pcs/lib/cib/test/test_constraint_ticket.py
@@ -72,7 +72,7 @@ class PrepareOptionsPlainTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "unknown",
+ "option_names": ["unknown"],
"option_type": None,
"allowed": ["id", "loss-policy", "rsc", "rsc-role", "ticket"],
}
@@ -100,7 +100,7 @@ class PrepareOptionsPlainTest(TestCase):
severities.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
{
- "option_name": "ticket"
+ "option_names": ["ticket"]
}
),
)
@@ -114,7 +114,7 @@ class PrepareOptionsPlainTest(TestCase):
severities.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
{
- "option_name": "rsc",
+ "option_names": ["rsc"],
}
),
)
@@ -223,7 +223,7 @@ class PrepareOptionsWithSetTest(TestCase):
(
severities.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "ticket"}
+ {"option_names": ["ticket"]}
)
)
@@ -237,7 +237,7 @@ class PrepareOptionsWithSetTest(TestCase):
(
severities.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "ticket"}
+ {"option_names": ["ticket"]}
)
)
diff --git a/pcs/lib/cib/test/test_fencing_topology.py b/pcs/lib/cib/test/test_fencing_topology.py
new file mode 100644
index 0000000..52cf724
--- /dev/null
+++ b/pcs/lib/cib/test/test_fencing_topology.py
@@ -0,0 +1,984 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError, ReportItemSeverity as severity
+from pcs.lib.pacemaker.state import ClusterState
+from pcs.test.tools.assertions import (
+ assert_raise_library_error,
+ assert_report_item_list_equal,
+ assert_xml_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import create_patcher
+from pcs.test.tools.pcs_unittest import TestCase#, mock
+from pcs.test.tools.xml import etree_to_str
+
+from pcs.common.fencing_topology import (
+ TARGET_TYPE_NODE,
+ TARGET_TYPE_REGEXP,
+ TARGET_TYPE_ATTRIBUTE,
+)
+from pcs.lib.cib import fencing_topology as lib
+
+
+patch_lib = create_patcher("pcs.lib.cib.fencing_topology")
+
+
+class CibMixin(object):
+ def get_cib(self):
+ return etree.fromstring("""
+ <fencing-topology>
+ <fencing-level
+ id="fl1" index="1" devices="d1,d2" target="nodeA"
+ />
+ <fencing-level
+ id="fl2" index="2" devices="d3" target="nodeA"
+ />
+ <fencing-level
+ id="fl3" index="1" devices="d2,d1" target="nodeB"
+ />
+ <fencing-level
+ id="fl4" index="2" devices="d3" target="nodeB"
+ />
+ <fencing-level
+ id="fl5" index="1" devices="d3,d4" target-pattern="node\d+"
+ />
+ <fencing-level
+ id="fl6" index="2" devices="d1" target-pattern="node\d+"
+ />
+ <fencing-level
+ id="fl7" index="3" devices="d3,d4"
+ target-attribute="fencing" target-value="improved"
+ />
+ <fencing-level
+ id="fl8" index="4" devices="d5"
+ target-attribute="fencing" target-value="improved"
+ />
+ <fencing-level
+ id="fl9" index="3" devices="dR" target-pattern="node-R.*"
+ />
+ <fencing-level
+ id="fl10" index="4" devices="dR-special"
+ target-attribute="fencing" target-value="remote-special"
+ />
+ </fencing-topology>
+ """)
+
+
+class StatusNodesMixin(object):
+ def get_status(self):
+ return ClusterState("""
+ <crm_mon version="1.1.15">
+ <summary>
+ <current_dc present="true" />
+ <nodes_configured number="2" expected_votes="unknown" />
+ <resources_configured number="0" />
+ </summary>
+ <nodes>
+ <node name="nodeA" id="1" online="true" standby="false"
+ standby_onfail="false" maintenance="false"
+ pending="false" unclean="false" shutdown="false"
+ expected_up="true" is_dc="true" resources_running="0"
+ type="member"
+ />
+ <node name="nodeB" id="2" online="true" standby="false"
+ standby_onfail="false" maintenance="false"
+ pending="false" unclean="false" shutdown="false"
+ expected_up="true" is_dc="false" resources_running="0"
+ type="member"
+ />
+ </nodes>
+ </crm_mon>
+ """).node_section.nodes
+
+
+ at patch_lib("_append_level_element")
+ at patch_lib("_validate_level_target_devices_does_not_exist")
+ at patch_lib("_validate_devices")
+ at patch_lib("_validate_target")
+ at patch_lib("_validate_level", return_value="valid_level")
+class AddLevel(TestCase):
+ def setUp(self):
+ self.reporter = MockLibraryReportProcessor()
+
+ def assert_validators_called(
+ self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ dupl_called=True
+ ):
+ mock_val_level.assert_called_once_with(self.reporter, "level")
+ mock_val_target.assert_called_once_with(
+ self.reporter, "cluster_status_nodes", "target_type",
+ "target_value", "force_node"
+ )
+ mock_val_devices.assert_called_once_with(
+ self.reporter, "resources_el", "devices", "force_device"
+ )
+ if dupl_called:
+ mock_val_dupl.assert_called_once_with(
+ self.reporter, "topology_el", "level", "target_type",
+ "target_value", "devices"
+ )
+ else:
+ mock_val_dupl.assert_not_called()
+
+ def assert_called_invalid(
+ self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append, dupl_called=True
+ ):
+ self.assertRaises(
+ LibraryError,
+ lambda: lib.add_level(
+ self.reporter, "topology_el", "resources_el", "level",
+ "target_type", "target_value", "devices",
+ "cluster_status_nodes", "force_device", "force_node"
+ )
+ )
+ self.assert_validators_called(
+ mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ dupl_called
+ )
+ mock_append.assert_not_called()
+
+ def test_success(
+ self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append
+ ):
+ lib.add_level(
+ self.reporter, "topology_el", "resources_el", "level",
+ "target_type", "target_value", "devices", "cluster_status_nodes",
+ "force_device", "force_node"
+ )
+ self.assert_validators_called(
+ mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl
+ )
+ mock_append.assert_called_once_with(
+ "topology_el", "valid_level", "target_type", "target_value",
+ "devices"
+ )
+
+ def test_invalid_level(
+ self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append
+ ):
+ mock_val_level.side_effect = lambda reporter, level: reporter.append(
+ reports.invalid_option_value("level", level, "a positive integer")
+ )
+ self.assert_called_invalid(
+ mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append, dupl_called=False
+ )
+
+ def test_invalid_target(
+ self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append
+ ):
+ mock_val_target.side_effect = (
+ lambda reporter, status_nodes, target_type, target_value, force:
+ reporter.append(
+ reports.node_not_found(target_value)
+ )
+ )
+ self.assert_called_invalid(
+ mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append, dupl_called=False
+ )
+
+ def test_invalid_devices(
+ self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append
+ ):
+ mock_val_devices.side_effect = (
+ lambda reporter, resources, devices, force:
+ reporter.append(
+ reports.stonith_resources_do_not_exist(["device"])
+ )
+ )
+ self.assert_called_invalid(
+ mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append, dupl_called=False
+ )
+
+ def test_already_exists(
+ self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append
+ ):
+ mock_val_dupl.side_effect = (
+ lambda reporter, tree, level, target_type, target_value, devices:
+ reporter.append(
+ reports.fencing_level_already_exists(
+ level, target_type, target_value, devices
+ )
+ )
+ )
+ self.assert_called_invalid(
+ mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl,
+ mock_append, dupl_called=True
+ )
+
+
+class RemoveAllLevels(TestCase, CibMixin):
+ def setUp(self):
+ self.tree = self.get_cib()
+
+ def test_success(self):
+ lib.remove_all_levels(self.tree)
+ assert_xml_equal(
+ "<fencing-topology />",
+ etree_to_str(self.tree)
+ )
+
+
+class RemoveLevelsByParams(TestCase, CibMixin):
+ def setUp(self):
+ self.tree = self.get_cib()
+ self.reporter = MockLibraryReportProcessor()
+
+ def get_remaining_ids(self):
+ return [el.get("id") for el in self.tree.findall("fencing-level")]
+
+ def test_level(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, level=2
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ ["fl1", "fl3", "fl5", "fl7", "fl8", "fl9", "fl10"]
+ )
+ assert_report_item_list_equal(self.reporter.report_item_list, [])
+
+ def test_target_node(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, target_type=TARGET_TYPE_NODE,
+ target_value="nodeA"
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ ["fl3", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9", "fl10"]
+ )
+ assert_report_item_list_equal(self.reporter.report_item_list, [])
+
+ def test_target_pattern(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, target_type=TARGET_TYPE_REGEXP,
+ target_value="node\d+"
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ ["fl1", "fl2", "fl3", "fl4", "fl7", "fl8", "fl9", "fl10"]
+ )
+ assert_report_item_list_equal(self.reporter.report_item_list, [])
+
+ def test_target_attrib(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, target_type=TARGET_TYPE_ATTRIBUTE,
+ target_value=("fencing", "improved")
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ ["fl1", "fl2", "fl3", "fl4", "fl5", "fl6", "fl9", "fl10"]
+ )
+ assert_report_item_list_equal(self.reporter.report_item_list, [])
+
+ def test_one_device(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, devices=["d3"]
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ ["fl1", "fl3", "fl5", "fl6", "fl7", "fl8", "fl9", "fl10"]
+ )
+ assert_report_item_list_equal(self.reporter.report_item_list, [])
+
+ def test_more_devices(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, devices=["d2", "d1"]
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ ["fl1", "fl2", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9", "fl10"]
+ )
+ assert_report_item_list_equal(self.reporter.report_item_list, [])
+
+ def test_combination(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, 2, TARGET_TYPE_NODE, "nodeB", ["d3"]
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ ["fl1", "fl2", "fl3", "fl5", "fl6", "fl7", "fl8", "fl9", "fl10"]
+ )
+ assert_report_item_list_equal(self.reporter.report_item_list, [])
+
+ def test_invalid_target(self):
+ assert_raise_library_error(
+ lambda: lib.remove_levels_by_params(
+ self.reporter, self.tree, target_type="bad_target",
+ target_value="nodeA"
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION_TYPE,
+ {
+ "option_name": "target",
+ "allowed_types": [
+ "node",
+ "regular expression",
+ "attribute_name=value"
+ ]
+ },
+ None
+ ),
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ [
+ "fl1", "fl2", "fl3", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9",
+ "fl10"
+ ]
+ )
+
+ def test_no_such_level(self):
+ assert_raise_library_error(
+ lambda: lib.remove_levels_by_params(
+ self.reporter, self.tree, 9, TARGET_TYPE_NODE, "nodeB", ["d3"]
+ ),
+ (
+ severity.ERROR,
+ report_codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST,
+ {
+ "devices": ["d3", ],
+ "target_type": TARGET_TYPE_NODE,
+ "target_value": "nodeB",
+ "level": 9,
+ },
+ None
+ ),
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ [
+ "fl1", "fl2", "fl3", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9",
+ "fl10"
+ ]
+ )
+
+ def test_no_such_level_ignore_missing(self):
+ lib.remove_levels_by_params(
+ self.reporter, self.tree, 9, TARGET_TYPE_NODE, "nodeB", ["d3"], True
+ )
+ self.assertEqual(
+ self.get_remaining_ids(),
+ [
+ "fl1", "fl2", "fl3", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9",
+ "fl10"
+ ]
+ )
+
+class RemoveDeviceFromAllLevels(TestCase, CibMixin):
+ def setUp(self):
+ self.tree = self.get_cib()
+
+ def test_success(self):
+ lib.remove_device_from_all_levels(self.tree, "d3")
+ assert_xml_equal(
+ """
+ <fencing-topology>
+ <fencing-level
+ id="fl1" index="1" devices="d1,d2" target="nodeA"
+ />
+ <fencing-level
+ id="fl3" index="1" devices="d2,d1" target="nodeB"
+ />
+ <fencing-level
+ id="fl5" index="1" devices="d4" target-pattern="node\d+"
+ />
+ <fencing-level
+ id="fl6" index="2" devices="d1" target-pattern="node\d+"
+ />
+ <fencing-level
+ id="fl7" index="3" devices="d4"
+ target-attribute="fencing" target-value="improved"
+ />
+ <fencing-level
+ id="fl8" index="4" devices="d5"
+ target-attribute="fencing" target-value="improved"
+ />
+ <fencing-level
+ id="fl9" index="3" devices="dR" target-pattern="node-R.*"
+ />
+ <fencing-level
+ id="fl10" index="4" devices="dR-special"
+ target-attribute="fencing" target-value="remote-special"
+ />
+ </fencing-topology>
+ """,
+ etree_to_str(self.tree)
+ )
+
+ def test_no_such_device(self):
+ original_xml = etree_to_str(self.tree)
+ lib.remove_device_from_all_levels(self.tree, "dX")
+ assert_xml_equal(original_xml, etree_to_str(self.tree))
+
+
+class Export(TestCase, CibMixin):
+ def test_empty(self):
+ self.assertEqual(
+ lib.export(etree.fromstring("<fencing-topology />")),
+ []
+ )
+
+ def test_success(self):
+ self.assertEqual(
+ lib.export(self.get_cib()),
+ [
+ {
+ "level": "1",
+ "target_type": "node",
+ "target_value": "nodeA",
+ "devices": ["d1", "d2"],
+ },
+ {
+ "level": "2",
+ "target_type": "node",
+ "target_value": "nodeA",
+ "devices": ["d3"],
+ },
+ {
+ "level": "1",
+ "target_type": "node",
+ "target_value": "nodeB",
+ "devices": ["d2", "d1"],
+ },
+ {
+ "level": "2",
+ "target_type": "node",
+ "target_value": "nodeB",
+ "devices": ["d3"],
+ },
+ {
+ "level": "1",
+ "target_type": "regexp",
+ "target_value": "node\d+",
+ "devices": ["d3", "d4"],
+ },
+ {
+ "level": "2",
+ "target_type": "regexp",
+ "target_value": "node\d+",
+ "devices": ["d1"],
+ },
+ {
+ "level": "3",
+ "target_type": "attribute",
+ "target_value": ("fencing", "improved"),
+ "devices": ["d3", "d4"],
+ },
+ {
+ "level": "4",
+ "target_type": "attribute",
+ "target_value": ("fencing", "improved"),
+ "devices": ["d5"],
+ },
+ {
+ "level": "3",
+ "target_type": "regexp",
+ "target_value": "node-R.*",
+ "devices": ["dR"],
+ },
+ {
+ "level": "4",
+ "target_type": "attribute",
+ "target_value": ("fencing", "remote-special"),
+ "devices": ["dR-special"],
+ }
+ ]
+ )
+
+
+class Verify(TestCase, CibMixin, StatusNodesMixin):
+ def fixture_resource(self, tree, name):
+ el = etree.SubElement(tree, "primitive", id=name, type="fence_dummy")
+ el.set("class", "stonith")
+
+ def test_empty(self):
+ resources = etree.fromstring("<resources />")
+ topology = etree.fromstring("<fencing-topology />")
+ reporter = MockLibraryReportProcessor()
+
+ lib.verify(reporter, topology, resources, self.get_status())
+
+ assert_report_item_list_equal(reporter.report_item_list, [])
+
+ def test_success(self):
+ resources = etree.fromstring("<resources />")
+ for name in ["d1", "d2", "d3", "d4", "d5", "dR", "dR-special"]:
+ self.fixture_resource(resources, name)
+ reporter = MockLibraryReportProcessor()
+
+ lib.verify(reporter, self.get_cib(), resources, self.get_status())
+
+ assert_report_item_list_equal(reporter.report_item_list, [])
+
+ def test_failures(self):
+ resources = etree.fromstring("<resources />")
+ reporter = MockLibraryReportProcessor()
+
+ lib.verify(reporter, self.get_cib(), resources, [])
+
+ report = [
+ (
+ severity.ERROR,
+ report_codes.STONITH_RESOURCES_DO_NOT_EXIST,
+ {
+ "stonith_ids": [
+ "d1", "d2", "d3", "d4", "d5", "dR", "dR-special"
+ ],
+ },
+ None
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {
+ "node": "nodeA",
+ },
+ None
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {
+ "node": "nodeB",
+ },
+ None
+ ),
+ ]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+
+class ValidateLevel(TestCase):
+ def test_success(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_level(reporter, 1)
+ lib._validate_level(reporter, "1")
+ lib._validate_level(reporter, 9)
+ lib._validate_level(reporter, "9")
+ lib._validate_level(reporter, "05")
+ assert_report_item_list_equal(reporter.report_item_list, [])
+
+ def test_invalid(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_level(reporter, "")
+ lib._validate_level(reporter, 0)
+ lib._validate_level(reporter, "0")
+ lib._validate_level(reporter, -1)
+ lib._validate_level(reporter, "-1")
+ lib._validate_level(reporter, "1abc")
+ reports = []
+ for value in ["", 0, "0", -1, "-1", "1abc"]:
+ reports.append((
+ severity.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_value": value,
+ "option_name": "level",
+ "allowed_values": "a positive integer",
+ },
+ None
+ ))
+ assert_report_item_list_equal(reporter.report_item_list, reports)
+
+
+ at patch_lib("_validate_target_valuewise")
+ at patch_lib("_validate_target_typewise")
+class ValidateTarget(TestCase):
+ def test_delegate(self, validate_type, validate_value):
+ lib._validate_target("reporter", "status", "type", "value", "force")
+ validate_type.assert_called_once_with("reporter", "type")
+ validate_value.assert_called_once_with(
+ "reporter", "status", "type", "value", "force"
+ )
+
+
+class ValidateTargetTypewise(TestCase):
+ def test_success(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_typewise(reporter, TARGET_TYPE_NODE)
+ lib._validate_target_typewise(reporter, TARGET_TYPE_ATTRIBUTE)
+ lib._validate_target_typewise(reporter, TARGET_TYPE_REGEXP)
+ assert_report_item_list_equal(reporter.report_item_list, [])
+
+ def test_empty(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_typewise(reporter, "")
+ report = [(
+ severity.ERROR,
+ report_codes.INVALID_OPTION_TYPE,
+ {
+ "option_name": "target",
+ "allowed_types": [
+ "node",
+ "regular expression",
+ "attribute_name=value"
+ ],
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+ def test_invalid(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_typewise(reporter, "bad_target")
+ report = [(
+ severity.ERROR,
+ report_codes.INVALID_OPTION_TYPE,
+ {
+ "option_name": "target",
+ "allowed_types": [
+ "node",
+ "regular expression",
+ "attribute_name=value"
+ ],
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+
+class ValidateTargetValuewise(TestCase, StatusNodesMixin):
+ def setUp(self):
+ self.state = self.get_status()
+
+ def test_node_valid(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_valuewise(
+ reporter, self.state, TARGET_TYPE_NODE, "nodeA"
+ )
+ assert_report_item_list_equal(reporter.report_item_list, [])
+
+ def test_node_empty(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_valuewise(
+ reporter, self.state, TARGET_TYPE_NODE, ""
+ )
+ report = [(
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {
+ "node": "",
+ },
+ report_codes.FORCE_NODE_DOES_NOT_EXIST
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+ def test_node_invalid(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_valuewise(
+ reporter, self.state, TARGET_TYPE_NODE, "rh7-x"
+ )
+ report = [(
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {
+ "node": "rh7-x",
+ },
+ report_codes.FORCE_NODE_DOES_NOT_EXIST
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+ def test_node_invalid_force(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_valuewise(
+ reporter, self.state, TARGET_TYPE_NODE, "rh7-x", force_node=True
+ )
+ report = [(
+ severity.WARNING,
+ report_codes.NODE_NOT_FOUND,
+ {
+ "node": "rh7-x",
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+ def test_node_invalid_not_forceable(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_target_valuewise(
+ reporter, self.state, TARGET_TYPE_NODE, "rh7-x", allow_force=False
+ )
+ report = [(
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {
+ "node": "rh7-x",
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+
+class ValidateDevices(TestCase):
+ def setUp(self):
+ self.resources_el = etree.fromstring("""
+ <resources>
+ <primitive id="dummy"
+ class="ocf" provider="pacemaker" type="Stateful"
+ />
+ <primitive id="stonith1" class="stonith" type="fence_xvm" />
+ <primitive id="stonith2" class="stonith" type="fence_apc" />
+ </resources>
+ """)
+
+ def test_success(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_devices(
+ reporter, self.resources_el, ["stonith1"]
+ )
+ lib._validate_devices(
+ reporter, self.resources_el, ["stonith1", "stonith2"]
+ )
+ assert_report_item_list_equal(reporter.report_item_list, [])
+
+ def test_empty(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_devices(reporter, self.resources_el, [])
+ report = [(
+ severity.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_type": None,
+ "option_names": ["stonith devices"],
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+ def test_invalid(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_devices(reporter, self.resources_el, ["dummy", "fenceX"])
+ report = [(
+ severity.ERROR,
+ report_codes.STONITH_RESOURCES_DO_NOT_EXIST,
+ {
+ "stonith_ids": ["dummy", "fenceX"],
+ },
+ report_codes.FORCE_STONITH_RESOURCE_DOES_NOT_EXIST
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+ def test_invalid_forced(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_devices(
+ reporter, self.resources_el, ["dummy", "fenceX"], force_device=True
+ )
+ report = [(
+ severity.WARNING,
+ report_codes.STONITH_RESOURCES_DO_NOT_EXIST,
+ {
+ "stonith_ids": ["dummy", "fenceX"],
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+ def test_node_invalid_not_forceable(self):
+ reporter = MockLibraryReportProcessor()
+ lib._validate_devices(
+ reporter, self.resources_el, ["dummy", "fenceX"], allow_force=False
+ )
+ report = [(
+ severity.ERROR,
+ report_codes.STONITH_RESOURCES_DO_NOT_EXIST,
+ {
+ "stonith_ids": ["dummy", "fenceX"],
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+
+ at patch_lib("_find_level_elements")
+class ValidateLevelTargetDevicesDoesNotExist(TestCase):
+ def test_success(self, mock_find):
+ mock_find.return_value = []
+ reporter = MockLibraryReportProcessor()
+
+ lib._validate_level_target_devices_does_not_exist(
+ reporter, "tree", "level", "target_type", "target_value", "devices"
+ )
+
+ mock_find.assert_called_once_with(
+ "tree", "level", "target_type", "target_value", "devices"
+ )
+ assert_report_item_list_equal(reporter.report_item_list, [])
+
+ def test_error(self, mock_find):
+ mock_find.return_value = ["element"]
+ reporter = MockLibraryReportProcessor()
+
+ lib._validate_level_target_devices_does_not_exist(
+ reporter, "tree", "level", "target_type", "target_value", "devices"
+ )
+
+ mock_find.assert_called_once_with(
+ "tree", "level", "target_type", "target_value", "devices"
+ )
+ report = [(
+ severity.ERROR,
+ report_codes.CIB_FENCING_LEVEL_ALREADY_EXISTS,
+ {
+ "devices": "devices",
+ "target_type": "target_type",
+ "target_value": "target_value",
+ "level": "level",
+ },
+ None
+ )]
+ assert_report_item_list_equal(reporter.report_item_list, report)
+
+
+class AppendLevelElement(TestCase):
+ def setUp(self):
+ self.tree = etree.fromstring("<fencing-topology />")
+
+ def test_node_name(self):
+ lib._append_level_element(
+ self.tree, 1, TARGET_TYPE_NODE, "node1", ["d1"]
+ )
+ assert_xml_equal(
+ """
+ <fencing-topology>
+ <fencing-level
+ id="fl-node1-1"
+ devices="d1" index="1" target="node1"
+ />
+ </fencing-topology>
+ """,
+ etree_to_str(self.tree)
+ )
+
+ def test_node_pattern(self):
+ lib._append_level_element(
+ self.tree, "2", TARGET_TYPE_REGEXP, "node-\d+", ["d1", "d2"]
+ )
+ assert_xml_equal(
+ """
+ <fencing-topology>
+ <fencing-level
+ id="fl-node-d-2"
+ devices="d1,d2" index="2" target-pattern="node-\d+"
+ />
+ </fencing-topology>
+ """,
+ etree_to_str(self.tree)
+ )
+
+ def test_node_attribute(self):
+ lib._append_level_element(
+ self.tree, 3, TARGET_TYPE_ATTRIBUTE, ("name%@x", "val%@x"), ["d1"],
+ )
+ assert_xml_equal(
+ """
+ <fencing-topology>
+ <fencing-level
+ id="fl-namex-3"
+ devices="d1"
+ index="3"
+ target-attribute="name%@x" target-value="val%@x"
+ />
+ </fencing-topology>
+ """,
+ etree_to_str(self.tree)
+ )
+
+
+class FindLevelElements(TestCase, CibMixin):
+ def setUp(self):
+ self.tree = self.get_cib()
+
+ def get_ids(self, elements):
+ return [el.get("id") for el in elements]
+
+ def test_no_filter(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(self.tree)),
+ [
+ "fl1", "fl2", "fl3", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9",
+ "fl10"
+ ]
+ )
+
+ def test_no_such_level(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, level=2, target_type=TARGET_TYPE_NODE,
+ target_value="nodeB", devices=["d5"]
+ )),
+ []
+ )
+
+ def test_level(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, level=1
+ )),
+ ["fl1", "fl3", "fl5"]
+ )
+
+ def test_target_node(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, target_type=TARGET_TYPE_NODE, target_value="nodeB"
+ )),
+ ["fl3", "fl4"]
+ )
+
+ def test_target_pattern(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, target_type=TARGET_TYPE_REGEXP,
+ target_value="node-R.*"
+ )),
+ ["fl9"]
+ )
+
+ def test_target_attribute(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, target_type=TARGET_TYPE_ATTRIBUTE,
+ target_value=("fencing", "improved")
+ )),
+ ["fl7", "fl8"]
+ )
+
+ def test_devices(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, devices=["d3"]
+ )),
+ ["fl2", "fl4"]
+ )
+
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, devices=["d1", "d2"]
+ )),
+ ["fl1"]
+ )
+
+ def test_combination(self):
+ self.assertEqual(
+ self.get_ids(lib._find_level_elements(
+ self.tree, 2, TARGET_TYPE_NODE, "nodeB", ["d3"]
+ )),
+ ["fl4"]
+ )
diff --git a/pcs/lib/cib/test/test_node.py b/pcs/lib/cib/test/test_node.py
new file mode 100644
index 0000000..bd5a309
--- /dev/null
+++ b/pcs/lib/cib/test/test_node.py
@@ -0,0 +1,233 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.lib.pacemaker.state import ClusterState
+from pcs.test.tools.assertions import (
+ assert_raise_library_error,
+ assert_xml_equal,
+)
+from pcs.test.tools.pcs_unittest import TestCase, mock
+from pcs.test.tools.xml import etree_to_str
+
+from pcs.lib.cib import node
+
+
+ at mock.patch("pcs.lib.cib.node._ensure_node_exists")
+class UpdateNodeInstanceAttrs(TestCase):
+ def setUp(self):
+ self.node1 = etree.fromstring("""
+ <node id="1" uname="rh73-node1"/>
+ """)
+ self.node2 = etree.fromstring("""
+ <node id="2" uname="rh73-node2">
+ <instance_attributes id="nodes-2">
+ <nvpair name="a" value="A" />
+ <nvpair name="b" value="B" />
+ <nvpair name="c" value="C" />
+ </instance_attributes>
+ </node>
+ """)
+ self.node3 = etree.fromstring("""
+ <node id="3" uname="rh73-node3" >
+ <instance_attributes id="nodes-3-a">
+ <nvpair name="a" value="A" />
+ </instance_attributes>
+ <instance_attributes id="nodes-3-b">
+ <nvpair name="b" value="B" />
+ </instance_attributes>
+ </node>
+ """)
+ self.cib = etree.fromstring("""
+ <cib>
+ <configuration>
+ <nodes>{0}{1}{2}</nodes>
+ </configuration>
+ </cib>
+ """.format(*[
+ etree_to_str(el) for el in [self.node1, self.node2, self.node3]
+ ]))
+ self.state = "node state list"
+
+ def test_empty_node(self, mock_get_node):
+ mock_get_node.return_value = self.node1
+ node.update_node_instance_attrs(
+ self.cib, "rh73-node1", {"x": "X"}, self.state
+ )
+ assert_xml_equal(
+ etree_to_str(self.node1),
+ """
+ <node id="1" uname="rh73-node1">
+ <instance_attributes id="nodes-1">
+ <nvpair id="nodes-1-x" name="x" value="X" />
+ </instance_attributes>
+ </node>
+ """
+ )
+
+ def test_exisitng_attrs(self, mock_get_node):
+ mock_get_node.return_value = self.node2
+ node.update_node_instance_attrs(
+ self.cib, "rh73-node2", {"a": "", "b": "b", "x": "X"}, self.state
+ )
+ assert_xml_equal(
+ etree_to_str(self.node2),
+ """
+ <node id="2" uname="rh73-node2">
+ <instance_attributes id="nodes-2">
+ <nvpair name="b" value="b" />
+ <nvpair name="c" value="C" />
+ <nvpair id="nodes-2-x" name="x" value="X" />
+ </instance_attributes>
+ </node>
+ """
+ )
+
+ def test_multiple_attrs_sets(self, mock_get_node):
+ mock_get_node.return_value = self.node3
+ node.update_node_instance_attrs(
+ self.cib, "rh73-node3", {"x": "X"}, self.state
+ )
+ assert_xml_equal(
+ etree_to_str(self.node3),
+ """
+ <node id="3" uname="rh73-node3" >
+ <instance_attributes id="nodes-3-a">
+ <nvpair name="a" value="A" />
+ <nvpair id="nodes-3-a-x" name="x" value="X" />
+ </instance_attributes>
+ <instance_attributes id="nodes-3-b">
+ <nvpair name="b" value="B" />
+ </instance_attributes>
+ </node>
+ """
+ )
+
+class EnsureNodeExists(TestCase):
+ def setUp(self):
+ self.node1 = etree.fromstring("""
+ <node id="1" uname="name-test1" type="member" />
+ """)
+ self.node2 = etree.fromstring("""
+ <node id="2" uname="name-test2" type="member" />
+ """)
+ self.nodes = etree.Element("nodes")
+ self.nodes.append(self.node1)
+
+ self.state = ClusterState("""
+ <crm_mon version="1.1.15">
+ <summary>
+ <current_dc present="true" />
+ <nodes_configured number="2" expected_votes="unknown" />
+ <resources_configured number="0" />
+ </summary>
+ <nodes>
+ <node name="name-test1" id="1" online="true" standby="false"
+ standby_onfail="false" maintenance="false"
+ pending="false" unclean="false" shutdown="false"
+ expected_up="true" is_dc="true" resources_running="0"
+ type="member"
+ />
+ <node name="name-test2" id="2" online="true" standby="false"
+ standby_onfail="false" maintenance="false"
+ pending="false" unclean="false" shutdown="false"
+ expected_up="true" is_dc="false" resources_running="0"
+ type="member"
+ />
+ </nodes>
+ </crm_mon>
+ """).node_section.nodes
+
+ def test_node_already_exists(self):
+ assert_xml_equal(
+ etree_to_str(node._ensure_node_exists(self.nodes, "name-test1")),
+ etree_to_str(self.node1)
+ )
+
+ def test_node_missing_no_state(self):
+ assert_raise_library_error(
+ lambda: node._ensure_node_exists(self.nodes, "name-missing"),
+ (
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {"node": "name-missing"},
+ None
+ ),
+ )
+
+ def test_node_missing_not_in_state(self):
+ assert_raise_library_error(
+ lambda: node._ensure_node_exists(
+ self.nodes, "name-missing", self.state
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {"node": "name-missing"},
+ None
+ ),
+ )
+
+ def test_node_missing_and_gets_created(self):
+ assert_xml_equal(
+ etree_to_str(
+ node._ensure_node_exists(self.nodes, "name-test2", self.state)
+ ),
+ etree_to_str(self.node2)
+ )
+
+class GetNodeByUname(TestCase):
+ def setUp(self):
+ self.node1 = etree.fromstring("""
+ <node id="id-test1" uname="name-test1" />
+ """)
+ self.node2 = etree.fromstring("""
+ <node id="id-test2" uname="name-test2" />
+ """)
+ self.nodes = etree.Element("nodes")
+ self.nodes.append(self.node1)
+ self.nodes.append(self.node2)
+
+ def test_found(self):
+ assert_xml_equal(
+ etree_to_str(node._get_node_by_uname(self.nodes, "name-test1")),
+ """<node id="id-test1" uname="name-test1" />"""
+ )
+
+ def test_not_found(self):
+ self.assertTrue(
+ node._get_node_by_uname(self.nodes, "id-test1") is None
+ )
+
+class CreateNode(TestCase):
+ def setUp(self):
+ self.nodes = etree.Element("nodes")
+
+ def test_minimal(self):
+ node._create_node(self.nodes, "id-test", "name-test")
+ assert_xml_equal(
+ """
+ <nodes>
+ <node id="id-test" uname="name-test" />
+ </nodes>
+ """,
+ etree_to_str(self.nodes)
+ )
+
+ def test_with_type(self):
+ node._create_node(self.nodes, "id-test", "name-test", "type-test")
+ assert_xml_equal(
+ """
+ <nodes>
+ <node id="id-test" uname="name-test" type="type-test" />
+ </nodes>
+ """,
+ etree_to_str(self.nodes)
+ )
diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
index 0d18a7e..9b9d9b9 100644
--- a/pcs/lib/cib/test/test_nvpair.py
+++ b/pcs/lib/cib/test/test_nvpair.py
@@ -10,7 +10,20 @@ from lxml import etree
from pcs.lib.cib import nvpair
from pcs.test.tools.assertions import assert_xml_equal
from pcs.test.tools.pcs_unittest import TestCase, mock
+from pcs.test.tools.xml import etree_to_str
+class AppendNewNvpair(TestCase):
+ def test_append_new_nvpair_to_given_element(self):
+ nvset_element = etree.fromstring('<nvset id="a"/>')
+ nvpair._append_new_nvpair(nvset_element, "b", "c")
+ assert_xml_equal(
+ etree_to_str(nvset_element),
+ """
+ <nvset id="a">
+ <nvpair id="a-b" name="b" value="c"></nvpair>
+ </nvset>
+ """
+ )
class UpdateNvsetTest(TestCase):
@mock.patch(
@@ -38,7 +51,7 @@ class UpdateNvsetTest(TestCase):
<nvpair id="4" name="g" value="h"/>
</instance_attributes>
""",
- etree.tostring(nvset_element).decode()
+ etree_to_str(nvset_element)
)
def test_empty_value_has_no_effect(self):
xml = """
@@ -50,7 +63,25 @@ class UpdateNvsetTest(TestCase):
"""
nvset_element = etree.fromstring(xml)
nvpair.update_nvset(nvset_element, {})
- assert_xml_equal(xml, etree.tostring(nvset_element).decode())
+ assert_xml_equal(xml, etree_to_str(nvset_element))
+
+ def test_remove_empty_nvset(self):
+ xml_pre = """
+ <resource>
+ <instance_attributes id="iattrs">
+ <nvpair id="1" name="a" value="b"/>
+ </instance_attributes>
+ </resource>
+ """
+ xml_post = """
+ <resource>
+ </resource>
+ """
+ xml = etree.fromstring(xml_pre)
+ nvset_element = xml.find("instance_attributes")
+ nvpair.update_nvset(nvset_element, {"a": ""})
+ assert_xml_equal(xml_post, etree_to_str(xml))
+
class SetNvpairInNvsetTest(TestCase):
def setUp(self):
@@ -75,7 +106,7 @@ class SetNvpairInNvsetTest(TestCase):
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
- etree.tostring(self.nvset).decode()
+ etree_to_str(self.nvset)
)
def test_add(self):
@@ -89,7 +120,7 @@ class SetNvpairInNvsetTest(TestCase):
<nvpair id="nvset-test-1" name="test" value="0"/>
</nvset>
""",
- etree.tostring(self.nvset).decode()
+ etree_to_str(self.nvset)
)
def test_remove(self):
@@ -101,7 +132,7 @@ class SetNvpairInNvsetTest(TestCase):
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
- etree.tostring(self.nvset).decode()
+ etree_to_str(self.nvset)
)
def test_remove_not_existing(self):
@@ -114,11 +145,29 @@ class SetNvpairInNvsetTest(TestCase):
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
- etree.tostring(self.nvset).decode()
+ etree_to_str(self.nvset)
)
+class AppendNewNvsetTest(TestCase):
+ def test_append_new_nvset_to_given_element(self):
+ context_element = etree.fromstring('<context id="a"/>')
+ nvpair.append_new_nvset("instance_attributes", context_element, {
+ "a": "b",
+ "c": "d",
+ })
+ assert_xml_equal(
+ """
+ <context id="a">
+ <instance_attributes id="a-instance_attributes">
+ <nvpair id="a-instance_attributes-a" name="a" value="b"/>
+ <nvpair id="a-instance_attributes-c" name="c" value="d"/>
+ </instance_attributes>
+ </context>
+ """,
+ etree_to_str(context_element)
+ )
-class ArrangeSomeNvsetTest(TestCase):
+class ArrangeFirstNvsetTest(TestCase):
def setUp(self):
self.root = etree.Element("root", id="root")
self.nvset = etree.SubElement(self.root, "nvset", id="nvset")
@@ -142,7 +191,7 @@ class ArrangeSomeNvsetTest(TestCase):
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
- etree.tostring(self.nvset).decode()
+ etree_to_str(self.nvset)
)
def test_update_existing_nvset(self):
@@ -161,7 +210,7 @@ class ArrangeSomeNvsetTest(TestCase):
<nvpair id="nvset-test-1" name="test" value="0"/>
</nvset>
""",
- etree.tostring(self.nvset).decode()
+ etree_to_str(self.nvset)
)
def test_create_new_nvset_if_does_not_exist(self):
@@ -183,7 +232,7 @@ class ArrangeSomeNvsetTest(TestCase):
</nvset>
</root>
""",
- etree.tostring(root).decode()
+ etree_to_str(root)
)
@@ -218,3 +267,111 @@ class GetNvsetTest(TestCase):
],
nvpair.get_nvset(nvset)
)
+
+class GetValue(TestCase):
+ def assert_find_value(self, tag_name, name, value, xml, default=None):
+ self.assertEqual(
+ value,
+ nvpair.get_value(tag_name, etree.fromstring(xml), name, default)
+ )
+
+ def test_return_value_when_name_exists(self):
+ self.assert_find_value(
+ "meta_attributes",
+ "SOME-NAME",
+ "some-value",
+ """
+ <context>
+ <meta_attributes>
+ <nvpair name="SOME-NAME" value="some-value" />
+ <nvpair name="OTHER-NAME" value="other-value" />
+ </meta_attributes>
+ </context>
+ """,
+ )
+
+ def test_return_none_when_name_not_exists(self):
+ self.assert_find_value(
+ "instance_attributes",
+ "SOME-NAME",
+ value=None,
+ xml="""
+ <context>
+ <instance_attributes>
+ <nvpair name="another-name" value="some-value" />
+ </instance_attributes>
+ </context>
+ """,
+ )
+
+ def test_return_default_when_name_not_exists(self):
+ self.assert_find_value(
+ "instance_attributes",
+ "SOME-NAME",
+ value="DEFAULT",
+ xml="""
+ <context>
+ <instance_attributes>
+ <nvpair name="another-name" value="some-value" />
+ </instance_attributes>
+ </context>
+ """,
+ default="DEFAULT",
+ )
+
+ def test_return_none_when_no_nvpair(self):
+ self.assert_find_value(
+ "instance_attributes",
+ "SOME-NAME",
+ value=None,
+ xml="""
+ <context>
+ <instance_attributes />
+ </context>
+ """,
+ )
+
+ def test_return_none_when_no_nvset(self):
+ self.assert_find_value(
+ "instance_attributes",
+ "SOME-NAME",
+ value=None,
+ xml="""
+ <context>
+ </context>
+ """,
+ )
+
+class HasMetaAttribute(TestCase):
+ def test_return_false_if_does_not_have_such_attribute(self):
+ resource_element = etree.fromstring("""<primitive/>""")
+ self.assertFalse(
+ nvpair.has_meta_attribute(resource_element, "attr_name")
+ )
+
+ def test_return_true_if_such_meta_attribute_exists(self):
+ resource_element = etree.fromstring("""
+ <primitive>
+ <meta_attributes>
+ <nvpair id="a" name="attr_name" value="value"/>
+ <nvpair id="b" name="other_name" value="other-value"/>
+ </meta_attributes>
+ </primitive>
+ """)
+ self.assertTrue(
+ nvpair.has_meta_attribute(resource_element, "attr_name")
+ )
+
+ def test_return_false_if_meta_attribute_exists_but_in_nested_element(self):
+ resource_element = etree.fromstring("""
+ <group>
+ <primitive>
+ <meta_attributes>
+ <nvpair id="a" name="attr_name" value="value"/>
+ </meta_attributes>
+ </primitive>
+ </group>
+ """)
+ self.assertFalse(
+ nvpair.has_meta_attribute(resource_element, "attr_name")
+ )
diff --git a/pcs/lib/cib/test/test_resource.py b/pcs/lib/cib/test/test_resource.py
deleted file mode 100644
index c1e21a0..0000000
--- a/pcs/lib/cib/test/test_resource.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from __future__ import (
- absolute_import,
- division,
- print_function,
- unicode_literals,
-)
-
-from pcs.test.tools.pcs_unittest import TestCase
-from lxml import etree
-from pcs.lib.cib.resource import find_by_id
-
-class FindByIdTest(TestCase):
- def test_find_correct_tag(self):
- tree = etree.XML("""
- <root>
- <rsc_set id="A" />
- <primitive id="A" />
- </root>
- """)
- element = find_by_id(tree, "A")
- self.assertEqual(element.tag, "primitive")
diff --git a/pcs/lib/cib/test/test_resource_bundle.py b/pcs/lib/cib/test/test_resource_bundle.py
new file mode 100644
index 0000000..37b1d8e
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_bundle.py
@@ -0,0 +1,42 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib.cib.resource import bundle
+from pcs.test.tools.pcs_unittest import TestCase
+
+# pcs.lib.cib.resource.bundle is covered by:
+# - pcs.lib.commands.test.resource.test_bundle_create
+# - pcs.lib.commands.test.resource.test_bundle_update
+# - pcs.lib.commands.test.resource.test_resource_create
+
+class IsBundle(TestCase):
+ def test_is_bundle(self):
+ self.assertTrue(bundle.is_bundle(etree.fromstring("<bundle/>")))
+ self.assertFalse(bundle.is_bundle(etree.fromstring("<clone/>")))
+ self.assertFalse(bundle.is_bundle(etree.fromstring("<group/>")))
+
+
+class GetInnerResource(TestCase):
+ def assert_inner_resource(self, resource_id, xml):
+ self.assertEqual(
+ resource_id,
+ bundle.get_inner_resource(etree.fromstring(xml)).get("id", "")
+ )
+
+ def test_primitive(self):
+ self.assert_inner_resource(
+ "A",
+ """
+ <bundle id="B">
+ <meta_attributes />
+ <primitive id="A" />
+ <meta_attributes />
+ </bundle>
+ """
+ )
diff --git a/pcs/lib/cib/test/test_resource_clone.py b/pcs/lib/cib/test/test_resource_clone.py
new file mode 100644
index 0000000..dcdbb9a
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_clone.py
@@ -0,0 +1,109 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib.cib.resource import clone
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.assertions import assert_xml_equal
+
+class AppendNewCommon(TestCase):
+ def setUp(self):
+ self.cib = etree.fromstring("""
+ <cib>
+ <resources>
+ <primitive id="R"/>
+ </resources>
+ </cib>
+ """)
+ self.resources = self.cib.find(".//resources")
+ self.primitive = self.cib.find(".//primitive")
+
+ def assert_clone_effect(self, options, xml):
+ clone.append_new(
+ clone.TAG_CLONE,
+ self.resources,
+ self.primitive,
+ options
+ )
+ assert_xml_equal(etree.tostring(self.cib).decode(), xml)
+
+ def test_add_without_options(self):
+ self.assert_clone_effect({}, """
+ <cib>
+ <resources>
+ <clone id="R-clone">
+ <primitive id="R"/>
+ </clone>
+ </resources>
+ </cib>
+ """)
+
+ def test_add_with_options(self):
+ self.assert_clone_effect({"a": "b"}, """
+ <cib>
+ <resources>
+ <clone id="R-clone">
+ <primitive id="R"/>
+ <meta_attributes id="R-clone-meta_attributes">
+ <nvpair id="R-clone-meta_attributes-a"
+ name="a" value="b"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>
+ </cib>
+ """)
+
+
+class IsAnyClone(TestCase):
+ def test_is_clone(self):
+ self.assertTrue(clone.is_clone(etree.fromstring("<clone/>")))
+ self.assertFalse(clone.is_clone(etree.fromstring("<master/>")))
+ self.assertFalse(clone.is_clone(etree.fromstring("<group/>")))
+
+ def test_is_master(self):
+ self.assertTrue(clone.is_master(etree.fromstring("<master/>")))
+ self.assertFalse(clone.is_master(etree.fromstring("<clone/>")))
+ self.assertFalse(clone.is_master(etree.fromstring("<group/>")))
+
+ def test_is_any_clone(self):
+ self.assertTrue(clone.is_any_clone(etree.fromstring("<clone/>")))
+ self.assertTrue(clone.is_any_clone(etree.fromstring("<master/>")))
+ self.assertFalse(clone.is_any_clone(etree.fromstring("<group/>")))
+
+
+class GetInnerResource(TestCase):
+ def assert_inner_resource(self, resource_id, xml):
+ self.assertEqual(
+ resource_id,
+ clone.get_inner_resource(etree.fromstring(xml)).get("id", "")
+ )
+
+ def test_primitive(self):
+ self.assert_inner_resource(
+ "A",
+ """
+ <clone id="A-clone">
+ <meta_attributes />
+ <primitive id="A" />
+ <meta_attributes />
+ </clone>
+ """
+ )
+
+ def test_group(self):
+ self.assert_inner_resource(
+ "A",
+ """
+ <clone id="A-clone">
+ <meta_attributes />
+ <group id="A" />
+ <meta_attributes />
+ </clone>
+ """
+ )
diff --git a/pcs/lib/cib/test/test_resource_common.py b/pcs/lib/cib/test/test_resource_common.py
new file mode 100644
index 0000000..52c2329
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_common.py
@@ -0,0 +1,570 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib.cib.resource import common
+from pcs.test.tools.assertions import assert_xml_equal
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.xml import etree_to_str
+
+
+fixture_cib = etree.fromstring("""
+ <resources>
+ <primitive id="A" />
+ <clone id="B-clone">
+ <primitive id="B" />
+ </clone>
+ <master id="C-master">
+ <primitive id="C" />
+ </master>
+ <group id="D">
+ <primitive id="D1" />
+ <primitive id="D2" />
+ </group>
+ <clone id="E-clone">
+ <group id="E">
+ <primitive id="E1" />
+ <primitive id="E2" />
+ </group>
+ </clone>
+ <master id="F-master">
+ <group id="F">
+ <primitive id="F1" />
+ <primitive id="F2" />
+ </group>
+ </master>
+ <bundle id="G-bundle" />
+ <bundle id="H-bundle">
+ <primitive id="H" />
+ </bundle>
+ </resources>
+""")
+
+
+class AreMetaDisabled(TestCase):
+ def test_detect_is_disabled(self):
+ self.assertTrue(common.are_meta_disabled({"target-role": "Stopped"}))
+ self.assertTrue(common.are_meta_disabled({"target-role": "stopped"}))
+
+ def test_detect_is_not_disabled(self):
+ self.assertFalse(common.are_meta_disabled({}))
+ self.assertFalse(common.are_meta_disabled({"target-role": "any"}))
+
+
+class IsCloneDeactivatedByMeta(TestCase):
+ def assert_is_disabled(self, meta_attributes):
+ self.assertTrue(common.is_clone_deactivated_by_meta(meta_attributes))
+
+ def assert_is_not_disabled(self, meta_attributes):
+ self.assertFalse(common.is_clone_deactivated_by_meta(meta_attributes))
+
+ def test_detect_is_disabled(self):
+ self.assert_is_disabled({"target-role": "Stopped"})
+ self.assert_is_disabled({"target-role": "stopped"})
+ self.assert_is_disabled({"clone-max": "0"})
+ self.assert_is_disabled({"clone-max": "00"})
+ self.assert_is_disabled({"clone-max": 0})
+ self.assert_is_disabled({"clone-node-max": "0"})
+ self.assert_is_disabled({"clone-node-max": "abc1"})
+
+ def test_detect_is_not_disabled(self):
+ self.assert_is_not_disabled({})
+ self.assert_is_not_disabled({"target-role": "any"})
+ self.assert_is_not_disabled({"clone-max": "1"})
+ self.assert_is_not_disabled({"clone-max": "01"})
+ self.assert_is_not_disabled({"clone-max": 1})
+ self.assert_is_not_disabled({"clone-node-max": "1"})
+ self.assert_is_not_disabled({"clone-node-max": 1})
+ self.assert_is_not_disabled({"clone-node-max": "1abc"})
+ self.assert_is_not_disabled({"clone-node-max": "1.1"})
+
+
+class FindPrimitives(TestCase):
+ def assert_find_resources(self, input_resource_id, output_resource_ids):
+ self.assertEqual(
+ output_resource_ids,
+ [
+ element.get("id", "")
+ for element in
+ common.find_primitives(
+ fixture_cib.find(
+ './/*[@id="{0}"]'.format(input_resource_id)
+ )
+ )
+ ]
+ )
+
+ def test_primitive(self):
+ self.assert_find_resources("A", ["A"])
+
+ def test_primitive_in_clone(self):
+ self.assert_find_resources("B", ["B"])
+
+ def test_primitive_in_master(self):
+ self.assert_find_resources("C", ["C"])
+
+ def test_primitive_in_group(self):
+ self.assert_find_resources("D1", ["D1"])
+ self.assert_find_resources("D2", ["D2"])
+ self.assert_find_resources("E1", ["E1"])
+ self.assert_find_resources("E2", ["E2"])
+ self.assert_find_resources("F1", ["F1"])
+ self.assert_find_resources("F2", ["F2"])
+
+ def test_primitive_in_bundle(self):
+ self.assert_find_resources("H", ["H"])
+
+ def test_group(self):
+ self.assert_find_resources("D", ["D1", "D2"])
+
+ def test_group_in_clone(self):
+ self.assert_find_resources("E", ["E1", "E2"])
+
+ def test_group_in_master(self):
+ self.assert_find_resources("F", ["F1", "F2"])
+
+ def test_cloned_primitive(self):
+ self.assert_find_resources("B-clone", ["B"])
+
+ def test_cloned_group(self):
+ self.assert_find_resources("E-clone", ["E1", "E2"])
+
+ def test_mastered_primitive(self):
+ self.assert_find_resources("C-master", ["C"])
+
+ def test_mastered_group(self):
+ self.assert_find_resources("F-master", ["F1", "F2"])
+
+ def test_bundle_empty(self):
+ self.assert_find_resources("G-bundle", [])
+
+ def test_bundle_with_primitive(self):
+ self.assert_find_resources("H-bundle", ["H"])
+
+
+class FindResourcesToEnable(TestCase):
+ def assert_find_resources(self, input_resource_id, output_resource_ids):
+ self.assertEqual(
+ output_resource_ids,
+ [
+ element.get("id", "")
+ for element in
+ common.find_resources_to_enable(
+ fixture_cib.find(
+ './/*[@id="{0}"]'.format(input_resource_id)
+ )
+ )
+ ]
+ )
+
+ def test_primitive(self):
+ self.assert_find_resources("A", ["A"])
+
+ def test_primitive_in_clone(self):
+ self.assert_find_resources("B", ["B", "B-clone"])
+
+ def test_primitive_in_master(self):
+ self.assert_find_resources("C", ["C", "C-master"])
+
+ def test_primitive_in_group(self):
+ self.assert_find_resources("D1", ["D1"])
+ self.assert_find_resources("D2", ["D2"])
+ self.assert_find_resources("E1", ["E1"])
+ self.assert_find_resources("E2", ["E2"])
+ self.assert_find_resources("F1", ["F1"])
+ self.assert_find_resources("F2", ["F2"])
+
+ def test_primitive_in_bundle(self):
+ self.assert_find_resources("H", ["H"])
+
+ def test_group(self):
+ self.assert_find_resources("D", ["D"])
+
+ def test_group_in_clone(self):
+ self.assert_find_resources("E", ["E", "E-clone"])
+
+ def test_group_in_master(self):
+ self.assert_find_resources("F", ["F", "F-master"])
+
+ def test_cloned_primitive(self):
+ self.assert_find_resources("B-clone", ["B-clone", "B"])
+
+ def test_cloned_group(self):
+ self.assert_find_resources("E-clone", ["E-clone", "E"])
+
+ def test_mastered_primitive(self):
+ self.assert_find_resources("C-master", ["C-master", "C"])
+
+ def test_mastered_group(self):
+ self.assert_find_resources("F-master", ["F-master", "F"])
+
+ def test_bundle_empty(self):
+ self.assert_find_resources("G-bundle", [])
+
+ def test_bundle_with_primitive(self):
+ self.assert_find_resources("H-bundle", [])
+
+
+class Enable(TestCase):
+ def assert_enabled(self, pre, post):
+ resource = etree.fromstring(pre)
+ common.enable(resource)
+ assert_xml_equal(post, etree_to_str(resource))
+
+ def test_disabled(self):
+ self.assert_enabled(
+ """
+ <resource>
+ <meta_attributes>
+ <nvpair name="target-role" value="something" />
+ </meta_attributes>
+ </resource>
+ """,
+ """
+ <resource>
+ </resource>
+ """
+ )
+
+ def test_enabled(self):
+ self.assert_enabled(
+ """
+ <resource>
+ </resource>
+ """,
+ """
+ <resource>
+ </resource>
+ """
+ )
+
+ def test_only_first_meta(self):
+ # this captures the current behavior
+ # once pcs supports more instance and meta attributes for each resource,
+ # this test should be reconsidered
+ self.assert_enabled(
+ """
+ <resource>
+ <meta_attributes id="meta1">
+ <nvpair name="target-role" value="something" />
+ </meta_attributes>
+ <meta_attributes id="meta2">
+ <nvpair name="target-role" value="something" />
+ </meta_attributes>
+ </resource>
+ """,
+ """
+ <resource>
+ <meta_attributes id="meta2">
+ <nvpair name="target-role" value="something" />
+ </meta_attributes>
+ </resource>
+ """
+ )
+
+
+class Disable(TestCase):
+ def assert_disabled(self, pre, post):
+ resource = etree.fromstring(pre)
+ common.disable(resource)
+ assert_xml_equal(post, etree_to_str(resource))
+
+ def test_disabled(self):
+ xml = """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </resource>
+ """
+ self.assert_disabled(xml, xml)
+
+ def test_enabled(self):
+ self.assert_disabled(
+ """
+ <resource id="R">
+ </resource>
+ """,
+ """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </resource>
+ """
+ )
+
+ def test_only_first_meta(self):
+ # this captures the current behavior
+ # once pcs supports more instance and meta attributes for each resource,
+ # this test should be reconsidered
+ self.assert_disabled(
+ """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ </meta_attributes>
+ <meta_attributes id="R-meta_attributes-2">
+ </meta_attributes>
+ </resource>
+ """,
+ """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <meta_attributes id="R-meta_attributes-2">
+ </meta_attributes>
+ </resource>
+ """
+ )
+
+
+class FindResourcesToManage(TestCase):
+ def assert_find_resources(self, input_resource_id, output_resource_ids):
+ self.assertEqual(
+ output_resource_ids,
+ [
+ element.get("id", "")
+ for element in
+ common.find_resources_to_manage(
+ fixture_cib.find(
+ './/*[@id="{0}"]'.format(input_resource_id)
+ )
+ )
+ ]
+ )
+
+ def test_primitive(self):
+ self.assert_find_resources("A", ["A"])
+
+ def test_primitive_in_clone(self):
+ self.assert_find_resources("B", ["B", "B-clone"])
+
+ def test_primitive_in_master(self):
+ self.assert_find_resources("C", ["C", "C-master"])
+
+ def test_primitive_in_group(self):
+ self.assert_find_resources("D1", ["D1", "D"])
+ self.assert_find_resources("D2", ["D2", "D"])
+ self.assert_find_resources("E1", ["E1", "E-clone", "E"])
+ self.assert_find_resources("E2", ["E2", "E-clone", "E"])
+ self.assert_find_resources("F1", ["F1", "F-master", "F"])
+ self.assert_find_resources("F2", ["F2", "F-master", "F"])
+
+ def test_primitive_in_bundle(self):
+ self.assert_find_resources("H", ["H"])
+
+ def test_group(self):
+ self.assert_find_resources("D", ["D", "D1", "D2"])
+
+ def test_group_in_clone(self):
+ self.assert_find_resources("E", ["E", "E-clone", "E1", "E2"])
+
+ def test_group_in_master(self):
+ self.assert_find_resources("F", ["F", "F-master", "F1", "F2"])
+
+ def test_cloned_primitive(self):
+ self.assert_find_resources("B-clone", ["B-clone", "B"])
+
+ def test_cloned_group(self):
+ self.assert_find_resources("E-clone", ["E-clone", "E", "E1", "E2"])
+
+ def test_mastered_primitive(self):
+ self.assert_find_resources("C-master", ["C-master", "C"])
+
+ def test_mastered_group(self):
+ self.assert_find_resources("F-master", ["F-master", "F", "F1", "F2"])
+
+ def test_bundle_empty(self):
+ self.assert_find_resources("G-bundle", [])
+
+ def test_bundle_with_primitive(self):
+ self.assert_find_resources("H-bundle", [])
+
+
+class FindResourcesToUnmanage(TestCase):
+ def assert_find_resources(self, input_resource_id, output_resource_ids):
+ self.assertEqual(
+ output_resource_ids,
+ [
+ element.get("id", "")
+ for element in
+ common.find_resources_to_unmanage(
+ fixture_cib.find(
+ './/*[@id="{0}"]'.format(input_resource_id)
+ )
+ )
+ ]
+ )
+
+ def test_primitive(self):
+ self.assert_find_resources("A", ["A"])
+
+ def test_primitive_in_clone(self):
+ self.assert_find_resources("B", ["B"])
+
+ def test_primitive_in_master(self):
+ self.assert_find_resources("C", ["C"])
+
+ def test_primitive_in_group(self):
+ self.assert_find_resources("D1", ["D1"])
+ self.assert_find_resources("D2", ["D2"])
+ self.assert_find_resources("E1", ["E1"])
+ self.assert_find_resources("E2", ["E2"])
+ self.assert_find_resources("F1", ["F1"])
+ self.assert_find_resources("F2", ["F2"])
+
+ def test_primitive_in_bundle(self):
+ self.assert_find_resources("H", ["H"])
+
+ def test_group(self):
+ self.assert_find_resources("D", ["D1", "D2"])
+
+ def test_group_in_clone(self):
+ self.assert_find_resources("E", ["E1", "E2"])
+
+ def test_group_in_master(self):
+ self.assert_find_resources("F", ["F1", "F2"])
+
+ def test_cloned_primitive(self):
+ self.assert_find_resources("B-clone", ["B"])
+
+ def test_cloned_group(self):
+ self.assert_find_resources("E-clone", ["E1", "E2"])
+
+ def test_mastered_primitive(self):
+ self.assert_find_resources("C-master", ["C"])
+
+ def test_mastered_group(self):
+ self.assert_find_resources("F-master", ["F1", "F2"])
+
+ def test_bundle_empty(self):
+ self.assert_find_resources("G-bundle", [])
+
+ def test_bundle_with_primitive(self):
+ self.assert_find_resources("H-bundle", [])
+
+
+class Manage(TestCase):
+ def assert_managed(self, pre, post):
+ resource = etree.fromstring(pre)
+ common.manage(resource)
+ assert_xml_equal(post, etree_to_str(resource))
+
+ def test_unmanaged(self):
+ self.assert_managed(
+ """
+ <resource>
+ <meta_attributes>
+ <nvpair name="is-managed" value="something" />
+ </meta_attributes>
+ </resource>
+ """,
+ """
+ <resource>
+ </resource>
+ """
+ )
+
+ def test_managed(self):
+ self.assert_managed(
+ """
+ <resource>
+ </resource>
+ """,
+ """
+ <resource>
+ </resource>
+ """
+ )
+
+ def test_only_first_meta(self):
+ # this captures the current behavior
+ # once pcs supports more instance and meta attributes for each resource,
+ # this test should be reconsidered
+ self.assert_managed(
+ """
+ <resource>
+ <meta_attributes id="meta1">
+ <nvpair name="is-managed" value="something" />
+ </meta_attributes>
+ <meta_attributes id="meta2">
+ <nvpair name="is-managed" value="something" />
+ </meta_attributes>
+ </resource>
+ """,
+ """
+ <resource>
+ <meta_attributes id="meta2">
+ <nvpair name="is-managed" value="something" />
+ </meta_attributes>
+ </resource>
+ """
+ )
+
+
+class Unmanage(TestCase):
+ def assert_unmanaged(self, pre, post):
+ resource = etree.fromstring(pre)
+ common.unmanage(resource)
+ assert_xml_equal(post, etree_to_str(resource))
+
+ def test_unmanaged(self):
+ xml = """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </resource>
+ """
+ self.assert_unmanaged(xml, xml)
+
+ def test_managed(self):
+ self.assert_unmanaged(
+ """
+ <resource id="R">
+ </resource>
+ """,
+ """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </resource>
+ """
+ )
+
+ def test_only_first_meta(self):
+ # this captures the current behavior
+ # once pcs supports more instance and meta attributes for each resource,
+ # this test should be reconsidered
+ self.assert_unmanaged(
+ """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ </meta_attributes>
+ <meta_attributes id="R-meta_attributes-2">
+ </meta_attributes>
+ </resource>
+ """,
+ """
+ <resource id="R">
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <meta_attributes id="R-meta_attributes-2">
+ </meta_attributes>
+ </resource>
+ """
+ )
diff --git a/pcs/lib/cib/test/test_resource_group.py b/pcs/lib/cib/test/test_resource_group.py
new file mode 100644
index 0000000..e128295
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_group.py
@@ -0,0 +1,163 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib.resource import group
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error, assert_xml_equal
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+
+class IsGroup(TestCase):
+ def test_is_group(self):
+ self.assertTrue(group.is_group(etree.fromstring("<group/>")))
+ self.assertFalse(group.is_group(etree.fromstring("<clone/>")))
+ self.assertFalse(group.is_group(etree.fromstring("<master/>")))
+
+
+ at mock.patch("pcs.lib.cib.resource.group.find_element_by_tag_and_id")
+class ProvideGroup(TestCase):
+ def setUp(self):
+ self.cib = etree.fromstring(
+ '<cib><resources><group id="g"/></resources></cib>'
+ )
+ self.group_element = self.cib.find('.//group')
+ self.resources_section = self.cib.find('.//resources')
+
+ def test_search_in_whole_tree(self, find_element_by_tag_and_id):
+ def find_group(*args, **kwargs):
+ return self.group_element
+
+ find_element_by_tag_and_id.side_effect = find_group
+
+ self.assertEqual(
+ self.group_element,
+ group.provide_group(self.resources_section, "g")
+ )
+
+ def test_create_group_when_not_exists(self, find_element_by_tag_and_id):
+ find_element_by_tag_and_id.return_value = None
+ group_element = group.provide_group(self.resources_section, "g2")
+ self.assertEqual('group', group_element.tag)
+ self.assertEqual('g2', group_element.attrib["id"])
+
+class PlaceResource(TestCase):
+ def setUp(self):
+ self.group_element = etree.fromstring("""
+ <group id="g">
+ <primitive id="a"/>
+ <primitive id="b"/>
+ </group>
+ """)
+ self.primitive_element = etree.Element("primitive", {"id": "c"})
+
+ def assert_final_order(
+ self, id_list=None, adjacent_resource_id=None, put_after_adjacent=False
+ ):
+ group.place_resource(
+ self.group_element,
+ self.primitive_element,
+ adjacent_resource_id,
+ put_after_adjacent
+ )
+ assert_xml_equal(
+ etree.tostring(self.group_element).decode(),
+ """
+ <group id="g">
+ <primitive id="{0}"/>
+ <primitive id="{1}"/>
+ <primitive id="{2}"/>
+ </group>
+ """.format(*id_list)
+ )
+
+ def test_append_at_the_end_when_adjacent_is_not_specified(self):
+ self.assert_final_order(["a", "b", "c"])
+
+ def test_insert_before_adjacent(self):
+ self.assert_final_order(["c", "a", "b"], "a")
+
+ def test_insert_after_adjacent(self):
+ self.assert_final_order(["a", "c", "b"], "a", put_after_adjacent=True)
+
+ def test_insert_after_adjacent_which_is_last(self):
+ self.assert_final_order(["a", "b", "c"], "b", put_after_adjacent=True)
+
+ def test_refuse_to_put_next_to_the_same_resource_id(self):
+ assert_raise_library_error(
+ lambda: group.place_resource(
+ self.group_element,
+ self.primitive_element,
+ adjacent_resource_id="c",
+ ),
+ (
+ severities.ERROR,
+ report_codes.RESOURCE_CANNOT_BE_NEXT_TO_ITSELF_IN_GROUP,
+ {
+ "group_id": "g",
+ "resource_id": "c",
+ },
+ ),
+ )
+
+ def test_raises_when_adjacent_resource_not_in_group(self):
+ assert_raise_library_error(
+ lambda: group.place_resource(
+ self.group_element,
+ self.primitive_element,
+ adjacent_resource_id="r",
+ ),
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "r",
+ "id_description": "resource",
+ "context_type": "group",
+ "context_id": "g",
+ },
+ ),
+ )
+
+
+class GetInnerResource(TestCase):
+ def assert_inner_resource(self, resource_id, xml):
+ self.assertEqual(
+ resource_id,
+ [
+ element.attrib.get("id", "")
+ for element in group.get_inner_resources(etree.fromstring(xml))
+ ]
+ )
+
+ def test_one(self):
+ self.assert_inner_resource(
+ ["A"],
+ """
+ <group id="G">
+ <meta_attributes />
+ <primitive id="A" />
+ <meta_attributes />
+ </group>
+ """
+ )
+
+ def test_more(self):
+ self.assert_inner_resource(
+ ["A", "C", "B"],
+ """
+ <group id="G">
+ <meta_attributes />
+ <primitive id="A" />
+ <primitive id="C" />
+ <primitive id="B" />
+ <meta_attributes />
+ </group>
+ """
+ )
diff --git a/pcs/lib/cib/test/test_resource_guest_node.py b/pcs/lib/cib/test/test_resource_guest_node.py
new file mode 100644
index 0000000..d9f0b72
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_guest_node.py
@@ -0,0 +1,444 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib.resource import guest_node
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import(
+ assert_xml_equal,
+ assert_report_item_list_equal,
+)
+from pcs.test.tools.misc import create_setup_patch_mixin
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.lib.node import NodeAddresses
+
+
+SetupPatchMixin = create_setup_patch_mixin(guest_node)
+
+class ValidateHostConflicts(TestCase):
+ def validate(self, node_name, options):
+ tree = etree.fromstring("""
+ <cib>
+ <configuration>
+ <resources>
+ <primitive id="CONFLICT"/>
+ <primitive id="A">
+ <meta_attributes>
+ <nvpair name="remote-node"
+ value="GUEST_CONFLICT"
+ />
+ </meta_attributes>
+ </primitive>
+ <primitive id="B" class="ocf" provider="pacemaker"
+ type="remote"
+ >
+ <instance_attributes>
+ <nvpair name="server" value="REMOTE_CONFLICT"/>
+ </instance_attributes>
+ </primitive>
+ <primitive id="C">
+ <meta_attributes>
+ <nvpair name="remote-node" value="some"/>
+ <nvpair name="remote-addr"
+ value="GUEST_ADDR_CONFLICT"
+ />
+ </meta_attributes>
+ </primitive>
+ </resources>
+ </configuration>
+ </cib>
+ """)
+ nodes = [
+ NodeAddresses("RING0", "RING1", name="R1"),
+ NodeAddresses("REMOTE_CONFLICT", name="B"),
+ NodeAddresses("GUEST_CONFLICT", name="GUEST_CONFLICT"),
+ NodeAddresses("GUEST_ADDR_CONFLICT", name="some"),
+ ]
+ return guest_node.validate_conflicts(tree, nodes, node_name, options)
+
+ def assert_already_exists_error(
+ self, conflict_name, node_name, options=None
+ ):
+ assert_report_item_list_equal(
+ self.validate(node_name, options if options else {}),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ "id": conflict_name,
+ },
+ None
+ ),
+ ]
+ )
+
+
+ def test_report_conflict_with_id(self):
+ self.assert_already_exists_error("CONFLICT", "CONFLICT")
+
+ def test_report_conflict_guest_node(self):
+ self.assert_already_exists_error("GUEST_CONFLICT", "GUEST_CONFLICT")
+
+ def test_report_conflict_guest_addr(self):
+ self.assert_already_exists_error(
+ "GUEST_ADDR_CONFLICT",
+ "GUEST_ADDR_CONFLICT",
+ )
+
+ def test_report_conflict_guest_addr_by_addr(self):
+ self.assert_already_exists_error(
+ "GUEST_ADDR_CONFLICT",
+ "GUEST_ADDR_CONFLICT",
+ )
+
+ def test_no_conflict_guest_node_whe_addr_is_different(self):
+ self.assertEqual([], self.validate("GUEST_ADDR_CONFLICT", {
+ "remote-addr": "different",
+ }))
+
+ def test_report_conflict_remote_node(self):
+ self.assert_already_exists_error("REMOTE_CONFLICT", "REMOTE_CONFLICT")
+
+ def test_no_conflict_remote_node_whe_addr_is_different(self):
+ self.assertEqual([], self.validate("REMOTE_CONFLICT", {
+ "remote-addr": "different",
+ }))
+
+ def test_report_conflict_remote_node_by_addr(self):
+ self.assert_already_exists_error("REMOTE_CONFLICT", "different", {
+ "remote-addr": "REMOTE_CONFLICT",
+ })
+
+class ValidateOptions(TestCase):
+ def validate(self, options, name="some_name"):
+ return guest_node.validate_set_as_guest(
+ etree.fromstring('<cib/>'),
+ [NodeAddresses(
+ "EXISTING-HOST-RING0",
+ "EXISTING-HOST-RING0",
+ name="EXISTING-HOST-NAME"
+ )],
+ name,
+ options
+ )
+
+ def test_no_report_on_valid(self):
+ self.assertEqual(
+ [],
+ self.validate({}, "node1")
+ )
+
+ def test_report_invalid_option(self):
+ assert_report_item_list_equal(
+ self.validate({"invalid": "invalid"}, "node1"),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_type": "guest",
+ "option_names": ["invalid"],
+ "allowed": sorted(guest_node.GUEST_OPTIONS),
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_report_invalid_interval(self):
+ assert_report_item_list_equal(
+ self.validate({"remote-connect-timeout": "invalid"}, "node1"),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "remote-connect-timeout",
+ "option_value": "invalid",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_report_invalid_node_name(self):
+ assert_report_item_list_equal(
+ self.validate({}, "EXISTING-HOST-NAME"),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ "id": "EXISTING-HOST-NAME",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValidateInNotGuest(TestCase):
+ #guest_node.is_guest_node is tested here as well
+ def test_no_report_on_non_guest(self):
+ self.assertEqual(
+ [],
+ guest_node.validate_is_not_guest(etree.fromstring("<primitive/>"))
+ )
+
+ def test_report_when_is_guest(self):
+ assert_report_item_list_equal(
+ guest_node.validate_is_not_guest(etree.fromstring("""
+ <primitive id="resource_id">
+ <meta_attributes>
+ <nvpair name="remote-node" value="node1" />
+ </meta_attributes>
+ </primitive>
+ """)),
+ [
+ (
+ severities.ERROR,
+ report_codes.RESOURCE_IS_GUEST_NODE_ALREADY,
+ {
+ "resource_id": "resource_id",
+ },
+ None
+ ),
+ ]
+ )
+
+class SetAsGuest(TestCase):
+ def test_set_guest_meta_correctly(self):
+ resource_element = etree.fromstring('<primitive id="A"/>')
+ guest_node.set_as_guest(resource_element, "node1", connect_timeout="10")
+ assert_xml_equal(
+ etree.tostring(resource_element).decode(),
+ """
+ <primitive id="A">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-remote-connect-timeout"
+ name="remote-connect-timeout" value="10"
+ />
+ <nvpair id="A-meta_attributes-remote-node"
+ name="remote-node" value="node1"
+ />
+ </meta_attributes>
+ </primitive>
+ """
+ )
+
+class UnsetGuest(TestCase):
+ def test_unset_all_guest_attributes(self):
+ resource_element = etree.fromstring("""
+ <primitive id="A">
+ <meta_attributes id="B">
+ <nvpair id="C" name="remote-node" value="node1"/>
+ <nvpair id="D" name="remote-port" value="2222"/>
+ <nvpair id="E" name="remote-addr" value="node3"/>
+ <nvpair id="F" name="remote-connect-timeout" value="10"/>
+ <nvpair id="G" name="a" value="b"/>
+ </meta_attributes>
+ </primitive>
+ """)
+ guest_node.unset_guest(resource_element)
+ assert_xml_equal(
+ etree.tostring(resource_element).decode(),
+ """
+ <primitive id="A">
+ <meta_attributes id="B">
+ <nvpair id="G" name="a" value="b"/>
+ </meta_attributes>
+ </primitive>
+ """
+ )
+
+ def test_unset_all_guest_attributes_and_empty_meta_tag(self):
+ resource_element = etree.fromstring("""
+ <primitive id="A">
+ <meta_attributes id="B">
+ <nvpair id="C" name="remote-node" value="node1"/>
+ <nvpair id="D" name="remote-port" value="2222"/>
+ <nvpair id="E" name="remote-addr" value="node3"/>
+ <nvpair id="F" name="remote-connect-timeout" value="10"/>
+ </meta_attributes>
+ </primitive>
+ """)
+ guest_node.unset_guest(resource_element)
+ assert_xml_equal(
+ etree.tostring(resource_element).decode(),
+ '<primitive id="A"/>'
+ )
+
+
+class FindNodeList(TestCase, SetupPatchMixin):
+ def assert_find_meta_attributes(self, xml, meta_attributes_xml_list):
+ get_node = self.setup_patch("get_node", return_value=None)
+
+ self.assertEquals(
+ [None] * len(meta_attributes_xml_list),
+ guest_node.find_node_list(etree.fromstring(xml))
+ )
+
+ for i, call in enumerate(get_node.mock_calls):
+ assert_xml_equal(
+ meta_attributes_xml_list[i],
+ etree.tostring(call[1][0]).decode()
+ )
+
+ def test_get_no_nodes_when_no_primitives(self):
+ self.assert_find_meta_attributes("<resources/>", [])
+
+ def test_get_no_nodes_when_no_meta_remote_node(self):
+ self.assert_find_meta_attributes(
+ """
+ <resources>
+ <primitive>
+ <meta_attributes>
+ <nvpair name="remote-addr" value="G1"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """,
+ []
+ )
+
+ def test_get_multiple_nodes(self):
+ self.assert_find_meta_attributes(
+ """
+ <resources>
+ <primitive>
+ <meta_attributes>
+ <nvpair name="remote-node" value="G1"/>
+ <nvpair name="remote-addr" value="G1addr"/>
+ </meta_attributes>
+ </primitive>
+ <primitive>
+ <meta_attributes>
+ <nvpair name="remote-node" value="G2"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """,
+ [
+ """
+ <meta_attributes>
+ <nvpair name="remote-node" value="G1"/>
+ <nvpair name="remote-addr" value="G1addr"/>
+ </meta_attributes>
+ """,
+ """
+ <meta_attributes>
+ <nvpair name="remote-node" value="G2"/>
+ </meta_attributes>
+ """,
+ ]
+ )
+
+class GetNode(TestCase):
+ def assert_node(self, xml, expected_node):
+ node = guest_node.get_node(etree.fromstring(xml))
+ self.assertEquals(expected_node, (node.ring0, node.name))
+
+ def test_return_none_when_is_not_guest_node(self):
+ self.assertIsNone(guest_node.get_node(etree.fromstring(
+ """
+ <meta_attributes>
+ <nvpair name="remote-addr" value="G1"/>
+ </meta_attributes>
+ """
+ )))
+
+ def test_return_same_host_and_name_when_remote_node_only(self):
+ self.assert_node(
+ """
+ <meta_attributes>
+ <nvpair name="remote-node" value="G1"/>
+ </meta_attributes>
+ """,
+ ("G1", "G1")
+ )
+
+ def test_return_different_host_and_name_when_remote_addr_there(self):
+ self.assert_node(
+ """
+ <meta_attributes>
+ <nvpair name="remote-node" value="G1"/>
+ <nvpair name="remote-addr" value="G1addr"/>
+ </meta_attributes>
+ """,
+ ("G1addr", "G1")
+ )
+
+class GetHost(TestCase):
+ def assert_find_host(self, host, xml):
+ self.assertEqual(host, guest_node.get_host(etree.fromstring(xml)))
+
+ def test_return_host_from_remote_addr(self):
+ self.assert_find_host("HOST", """
+ <primitive>
+ <meta_attributes>
+ <nvpair name="remote-node" value="NODE" />
+ <nvpair name="remote-addr" value="HOST" />
+ </meta_attributes>
+ </primitive>
+ """)
+
+ def test_return_host_from_remote_node(self):
+ self.assert_find_host("HOST", """
+ <primitive>
+ <meta_attributes>
+ <nvpair name="remote-node" value="HOST" />
+ </meta_attributes>
+ </primitive>
+ """)
+
+ def test_return_none(self):
+ self.assert_find_host(None, """
+ <primitive>
+ <meta_attributes>
+ <nvpair name="any" value="HOST" />
+ </meta_attributes>
+ </primitive>
+ """)
+
+class FindNodeResources(TestCase):
+ def assert_return_resources(self, identifier):
+ resources_section = etree.fromstring("""
+ <resources>
+ <primitive id="RESOURCE_ID">
+ <meta_attributes>
+ <nvpair name="remote-node" value="NODE_NAME" />
+ <nvpair name="remote-addr" value="NODE_HOST" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """)
+ self.assertEquals(
+ "RESOURCE_ID",
+ guest_node.find_node_resources(resources_section, identifier)[0]
+ .attrib["id"]
+ )
+
+ def test_return_resources_by_resource_id(self):
+ self.assert_return_resources("RESOURCE_ID")
+
+ def test_return_resources_by_node_name(self):
+ self.assert_return_resources("NODE_NAME")
+
+ def test_return_resources_by_node_host(self):
+ self.assert_return_resources("NODE_HOST")
+
+ def test_no_result_when_no_guest_nodes(self):
+ resources_section = etree.fromstring(
+ '<resources><primitive id="RESOURCE_ID"/></resources>'
+ )
+ self.assertEquals([], guest_node.find_node_resources(
+ resources_section,
+ "RESOURCE_ID"
+ ))
diff --git a/pcs/lib/cib/test/test_resource_operations.py b/pcs/lib/cib/test/test_resource_operations.py
new file mode 100644
index 0000000..de2b507
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_operations.py
@@ -0,0 +1,392 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from functools import partial
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib.resource import operations
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.lib.validate import ValuePair
+from pcs.test.tools.assertions import assert_report_item_list_equal
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import create_patcher
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+
+patch_operations = create_patcher("pcs.lib.cib.resource.operations")
+
+ at patch_operations("get_remaining_defaults")
+ at patch_operations("complete_all_intervals")
+ at patch_operations("validate_different_intervals")
+ at patch_operations("validate_operation_list")
+ at patch_operations("normalized_to_operations")
+ at patch_operations("operations_to_normalized")
+class Prepare(TestCase):
+ def test_prepare(
+ self, operations_to_normalized, normalized_to_operations,
+ validate_operation_list, validate_different_intervals,
+ complete_all_intervals, get_remaining_defaults
+ ):
+ validate_operation_list.return_value = ["options_report"]
+ validate_different_intervals.return_value = [
+ "different_interval_report"
+ ]
+ operations_to_normalized.return_value = [
+ {"name": ValuePair("Start", "start")},
+ {"name": ValuePair("Monitor", "monitor")},
+ ]
+ normalized_to_operations.return_value = [
+ {"name": "start"},
+ {"name": "monitor"},
+ ]
+
+ report_processor = mock.MagicMock()
+ raw_operation_list = [
+ {"name": "Start"},
+ {"name": "Monitor"},
+ ]
+ default_operation_list = [
+ {"name": "stop"},
+ ]
+ allowed_operation_name_list = ["start", "stop", "monitor"]
+ allow_invalid = True
+
+ operations.prepare(
+ report_processor,
+ raw_operation_list,
+ default_operation_list,
+ allowed_operation_name_list,
+ allow_invalid,
+ )
+
+ operations_to_normalized.assert_called_once_with(raw_operation_list)
+ normalized_to_operations.assert_called_once_with(
+ operations_to_normalized.return_value
+ )
+ validate_operation_list.assert_called_once_with(
+ operations_to_normalized.return_value,
+ allowed_operation_name_list,
+ allow_invalid
+ )
+ validate_different_intervals.assert_called_once_with(
+ normalized_to_operations.return_value
+ )
+ complete_all_intervals.assert_called_once_with(
+ normalized_to_operations.return_value
+ )
+ get_remaining_defaults.assert_called_once_with(
+ report_processor,
+ normalized_to_operations.return_value,
+ default_operation_list
+ )
+ report_processor.process_list.assert_called_once_with([
+ "options_report",
+ "different_interval_report",
+ ])
+
+
+class ValidateDifferentIntervals(TestCase):
+ def test_return_empty_reports_on_empty_list(self):
+ operations.validate_different_intervals([])
+
+ def test_return_empty_reports_on_operations_without_duplication(self):
+ operations.validate_different_intervals([
+ {"name": "monitor", "interval": "10s"},
+ {"name": "monitor", "interval": "5s"},
+ {"name": "start", "interval": "5s"},
+ ])
+
+ def test_return_report_on_duplicated_intervals(self):
+ assert_report_item_list_equal(
+ operations.validate_different_intervals([
+ {"name": "monitor", "interval": "3600s"},
+ {"name": "monitor", "interval": "60m"},
+ {"name": "monitor", "interval": "1h"},
+ {"name": "monitor", "interval": "60s"},
+ {"name": "monitor", "interval": "1m"},
+ {"name": "monitor", "interval": "5s"},
+ ]),
+ [(
+ severities.ERROR,
+ report_codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION,
+ {
+ "duplications": {
+ "monitor": [
+ ["3600s", "60m", "1h"],
+ ["60s", "1m"],
+ ],
+ },
+ },
+ )]
+ )
+
+class MakeUniqueIntervals(TestCase):
+ def setUp(self):
+ self.report_processor = MockLibraryReportProcessor()
+ self.run = partial(
+ operations.make_unique_intervals,
+ self.report_processor
+ )
+
+ def test_return_copy_input_when_no_interval_duplication(self):
+ operation_list = [
+ {"name": "monitor", "interval": "10s"},
+ {"name": "monitor", "interval": "5s"},
+ {"name": "monitor", },
+ {"name": "monitor", "interval": ""},
+ {"name": "start", "interval": "5s"},
+ ]
+ self.assertEqual(operation_list, self.run(operation_list))
+
+ def test_adopt_duplicit_values(self):
+ self.assertEqual(
+ self.run([
+ {"name": "monitor", "interval": "60s"},
+ {"name": "monitor", "interval": "1m"},
+ {"name": "monitor", "interval": "5s"},
+ {"name": "monitor", "interval": "6s"},
+ {"name": "monitor", "interval": "5s"},
+ {"name": "start", "interval": "5s"},
+ ]),
+ [
+ {"name": "monitor", "interval": "60s"},
+ {"name": "monitor", "interval": "61"},
+ {"name": "monitor", "interval": "5s"},
+ {"name": "monitor", "interval": "6s"},
+ {"name": "monitor", "interval": "7"},
+ {"name": "start", "interval": "5s"},
+ ]
+ )
+
+ assert_report_item_list_equal(self.report_processor.report_item_list, [
+ (
+ severities.WARNING,
+ report_codes.RESOURCE_OPERATION_INTERVAL_ADAPTED,
+ {
+ "operation_name": "monitor",
+ "original_interval": "1m",
+ "adapted_interval": "61",
+ },
+ ),
+ (
+ severities.WARNING,
+ report_codes.RESOURCE_OPERATION_INTERVAL_ADAPTED,
+ {
+ "operation_name": "monitor",
+ "original_interval": "5s",
+ "adapted_interval": "7",
+ },
+ ),
+ ])
+
+ def test_keep_duplicit_values_when_are_not_valid_interval(self):
+ self.assertEqual(
+ self.run([
+ {"name": "monitor", "interval": "some"},
+ {"name": "monitor", "interval": "some"},
+ ]),
+ [
+ {"name": "monitor", "interval": "some"},
+ {"name": "monitor", "interval": "some"},
+ ]
+ )
+
+
+class Normalize(TestCase):
+ def test_return_operation_with_the_same_values(self):
+ operation = {
+ "name": "monitor",
+ "role": "Master",
+ "timeout": "10",
+ }
+
+ self.assertEqual(operation, dict([
+ (key, operations.normalize(key, value))
+ for key, value in operation.items()
+ ]))
+
+ def test_return_operation_with_normalized_values(self):
+ self.assertEqual(
+ {
+ "name": "monitor",
+ "role": "Master",
+ "timeout": "10",
+ "requires": "nothing",
+ "on-fail": "ignore",
+ "record-pending": "true",
+ "enabled": "1",
+ },
+ dict([(key, operations.normalize(key, value)) for key, value in {
+ "name": "monitor",
+ "role": "master",
+ "timeout": "10",
+ "requires": "Nothing",
+ "on-fail": "Ignore",
+ "record-pending": "True",
+ "enabled": "1",
+ }.items()])
+ )
+
+class ValidateOperation(TestCase):
+ def assert_operation_produces_report(self, operation, report_list):
+ assert_report_item_list_equal(
+ operations.validate_operation_list(
+ [operation],
+ ["monitor"],
+ ),
+ report_list
+ )
+
+ def test_return_empty_report_on_valid_operation(self):
+ self.assert_operation_produces_report(
+ {
+ "name": "monitor",
+ "role": "Master"
+ },
+ []
+ )
+
+ def test_validate_all_individual_options(self):
+ self.assertEqual(
+ ["REQUIRES REPORT", "ROLE REPORT"],
+ sorted(operations.validate_operation({"name": "monitor"}, [
+ mock.Mock(return_value=["ROLE REPORT"]),
+ mock.Mock(return_value=["REQUIRES REPORT"]),
+ ]))
+ )
+
+ def test_return_error_when_unknown_operation_attribute(self):
+ self.assert_operation_produces_report(
+ {
+ "name": "monitor",
+ "unknown": "invalid",
+ },
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["unknown"],
+ "option_type": "resource operation",
+ "allowed": sorted(operations.ATTRIBUTES),
+ },
+ None
+ ),
+ ],
+ )
+
+ def test_return_errror_when_missing_key_name(self):
+ self.assert_operation_produces_report(
+ {
+ "role": "Master"
+ },
+ [
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": ["name"],
+ "option_type": "resource operation",
+ },
+ None
+ ),
+ ],
+ )
+
+ def test_return_error_when_both_interval_origin_and_start_delay(self):
+ self.assert_operation_produces_report(
+ {
+ "name": "monitor",
+ "interval-origin": "a",
+ "start-delay": "b",
+ },
+ [
+ (
+ severities.ERROR,
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ {
+ "option_names": ["interval-origin", "start-delay"],
+ "option_type": "resource operation",
+ },
+ None
+ ),
+ ],
+ )
+
+class GetRemainingDefaults(TestCase):
+ @mock.patch("pcs.lib.cib.resource.operations.make_unique_intervals")
+ def test_returns_remining_operations(self, make_unique_intervals):
+ make_unique_intervals.side_effect = (
+ lambda report_processor, operations: operations
+ )
+ self.assertEqual(
+ operations.get_remaining_defaults(
+ report_processor=None,
+ operation_list =[{"name": "monitor"}],
+ default_operation_list=[{"name": "monitor"}, {"name": "start"}]
+ ),
+ [{"name": "start"}]
+ )
+
+
+class GetResourceOperations(TestCase):
+ resource_el = etree.fromstring("""
+ <primitive class="ocf" id="dummy" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="dummy-start" interval="0s" name="start" timeout="20"/>
+ <op id="dummy-stop" interval="0s" name="stop" timeout="20"/>
+ <op id="dummy-monitor-m" interval="10" name="monitor"
+ role="Master" timeout="20"/>
+ <op id="dummy-monitor-s" interval="11" name="monitor"
+ role="Slave" timeout="20"/>
+ </operations>
+ </primitive>
+ """)
+ resource_noop_el = etree.fromstring("""
+ <primitive class="ocf" id="dummy" provider="pacemaker" type="Stateful">
+ </primitive>
+ """)
+
+ def assert_op_list(self, op_list, expected_ids):
+ self.assertEqual(
+ [op.attrib.get("id") for op in op_list],
+ expected_ids
+ )
+
+ def test_all_operations(self):
+ self.assert_op_list(
+ operations.get_resource_operations(self.resource_el),
+ ["dummy-start", "dummy-stop", "dummy-monitor-m", "dummy-monitor-s"]
+ )
+
+ def test_filter_operations(self):
+ self.assert_op_list(
+ operations.get_resource_operations(self.resource_el, ["start"]),
+ ["dummy-start"]
+ )
+
+ def test_filter_more_operations(self):
+ self.assert_op_list(
+ operations.get_resource_operations(
+ self.resource_el,
+ ["monitor", "stop"]
+ ),
+ ["dummy-stop", "dummy-monitor-m", "dummy-monitor-s"]
+ )
+
+ def test_filter_none(self):
+ self.assert_op_list(
+ operations.get_resource_operations(self.resource_el, ["promote"]),
+ []
+ )
+
+ def test_no_operations(self):
+ self.assert_op_list(
+ operations.get_resource_operations(self.resource_noop_el),
+ []
+ )
diff --git a/pcs/lib/cib/test/test_resource_primitive.py b/pcs/lib/cib/test/test_resource_primitive.py
new file mode 100644
index 0000000..ed6ee63
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_primitive.py
@@ -0,0 +1,96 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from functools import partial
+
+from lxml import etree
+
+from pcs.lib.cib.resource import primitive
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+ at mock.patch("pcs.lib.cib.resource.primitive.append_new_instance_attributes")
+ at mock.patch("pcs.lib.cib.resource.primitive.append_new_meta_attributes")
+ at mock.patch("pcs.lib.cib.resource.primitive.create_operations")
+class AppendNew(TestCase):
+ def setUp(self):
+ self.resources_section = etree.fromstring("<resources/>")
+
+ self.instance_attributes = {"a": "b"}
+ self.meta_attributes = {"c": "d"}
+ self.operation_list = [{"name": "monitoring"}]
+
+ self.run = partial(
+ primitive.append_new,
+ self.resources_section,
+ instance_attributes=self.instance_attributes,
+ meta_attributes=self.meta_attributes,
+ operation_list=self.operation_list,
+ )
+
+ def check_mocks(
+ self,
+ primitive_element,
+ create_operations,
+ append_new_meta_attributes,
+ append_new_instance_attributes,
+ ):
+ create_operations.assert_called_once_with(
+ primitive_element,
+ self.operation_list
+ )
+ append_new_meta_attributes.assert_called_once_with(
+ primitive_element,
+ self.meta_attributes
+ )
+ append_new_instance_attributes.assert_called_once_with(
+ primitive_element,
+ self.instance_attributes
+ )
+
+ def test_append_without_provider(
+ self,
+ create_operations,
+ append_new_meta_attributes,
+ append_new_instance_attributes,
+ ):
+ primitive_element = self.run("RESOURCE_ID", "OCF", None, "DUMMY")
+ self.assertEqual(
+ primitive_element,
+ self.resources_section.find(".//primitive")
+ )
+ self.assertEqual(primitive_element.attrib["class"], "OCF")
+ self.assertEqual(primitive_element.attrib["type"], "DUMMY")
+ self.assertFalse(primitive_element.attrib.has_key("provider"))
+
+ self.check_mocks(
+ primitive_element,
+ create_operations,
+ append_new_meta_attributes,
+ append_new_instance_attributes,
+ )
+
+ def test_append_with_provider(
+ self,
+ create_operations,
+ append_new_meta_attributes,
+ append_new_instance_attributes,
+ ):
+ primitive_element = self.run("RESOURCE_ID", "OCF", "HEARTBEAT", "DUMMY")
+ self.assertEqual(
+ primitive_element,
+ self.resources_section.find(".//primitive")
+ )
+ self.assertEqual(primitive_element.attrib["class"], "OCF")
+ self.assertEqual(primitive_element.attrib["type"], "DUMMY")
+ self.assertEqual(primitive_element.attrib["provider"], "HEARTBEAT")
+
+ self.check_mocks(
+ primitive_element,
+ create_operations,
+ append_new_meta_attributes,
+ append_new_instance_attributes,
+ )
diff --git a/pcs/lib/cib/test/test_resource_remote_node.py b/pcs/lib/cib/test/test_resource_remote_node.py
new file mode 100644
index 0000000..dd3569b
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_remote_node.py
@@ -0,0 +1,287 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib.resource import remote_node
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.lib.node import NodeAddresses
+from pcs.test.tools.assertions import assert_report_item_list_equal
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+
+class FindNodeList(TestCase):
+ def assert_nodes_equals(self, xml, expected_nodes):
+ self.assertEquals(
+ expected_nodes,
+ [
+ (node.ring0, node.name)
+ for node in remote_node.find_node_list(etree.fromstring(xml))
+ ]
+ )
+ def test_find_multiple_nodes(self):
+ self.assert_nodes_equals(
+ """
+ <resources>
+ <primitive class="ocf" id="R1"
+ provider="pacemaker" type="remote"
+ >
+ <instance_attributes>
+ <nvpair name="server" value="H1"/>
+ </instance_attributes>
+ </primitive>
+ <primitive class="ocf" id="R2"
+ provider="pacemaker" type="remote"
+ >
+ <instance_attributes id="first-attribs">
+ <nvpair name="server" value="H2"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ """,
+ [
+ ("H1", "R1"),
+ ("H2", "R2"),
+ ]
+ )
+
+ def test_find_no_nodes(self):
+ self.assert_nodes_equals(
+ """
+ <resources>
+ <primitive class="ocf" id="D" provider="heartbeat" type="dummy">
+ <meta_attributes>
+ <nvpair name="server" value="H1"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """,
+ []
+ )
+
+ def test_find_nodes_without_server(self):
+ self.assert_nodes_equals(
+ """
+ <resources>
+ <primitive class="ocf" id="R1"
+ provider="pacemaker" type="remote"
+ >
+ </primitive>
+ </resources>
+ """,
+ [
+ ("R1", "R1"),
+ ]
+ )
+
+ def test_find_nodes_with_empty_server(self):
+ #it does not work, but the node "R1" is visible as remote node in the
+ #status
+ self.assert_nodes_equals(
+ """
+ <resources>
+ <primitive class="ocf" id="R1"
+ provider="pacemaker" type="remote"
+ >
+ <instance_attributes id="first-attribs">
+ <nvpair name="server" value=""/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ """,
+ [
+ ("R1", "R1"),
+ ]
+ )
+
+
+class FindNodeResources(TestCase):
+ def assert_resources_equals(self, node_identifier, xml, resource_id_list):
+ self.assertEqual(
+ resource_id_list,
+ [
+ resource_element.attrib["id"]
+ for resource_element in remote_node.find_node_resources(
+ etree.fromstring(xml),
+ node_identifier
+ )
+ ]
+ )
+
+ def test_find_all_resources(self):
+ self.assert_resources_equals(
+ "HOST",
+ """<resources>
+ <primitive class="ocf" id="R1"
+ provider="pacemaker" type="remote"
+ >
+ <instance_attributes>
+ <nvpair name="server" value="HOST"/>
+ </instance_attributes>
+ </primitive>
+ <primitive class="ocf" id="R2"
+ provider="pacemaker" type="remote"
+ >
+ <instance_attributes id="first-attribs">
+ <nvpair name="server" value="HOST"/>
+ </instance_attributes>
+ </primitive>
+ </resources>""",
+ ["R1", "R2"]
+ )
+
+ def test_find_by_resource_id(self):
+ self.assert_resources_equals(
+ "HOST",
+ """<resources>
+ <primitive class="ocf" id="HOST"
+ provider="pacemaker" type="remote"
+ />
+ </resources>""",
+ ["HOST"]
+ )
+
+ def test_ignore_non_remote_primitives(self):
+ self.assert_resources_equals(
+ "HOST",
+ """<resources>
+ <primitive class="ocf" id="HOST"
+ provider="heartbeat" type="Dummy"
+ />
+ </resources>""",
+ []
+ )
+
+
+class GetHost(TestCase):
+ def test_return_host_when_there(self):
+ self.assertEqual(
+ "HOST",
+ remote_node.get_host(etree.fromstring("""
+ <primitive class="ocf" id="R" provider="pacemaker" type="remote"
+ >
+ <instance_attributes>
+ <nvpair name="server" value="HOST"/>
+ </instance_attributes>
+ </primitive>
+ """))
+ )
+
+ def test_return_none_when_host_not_found(self):
+ self.assertIsNone(remote_node.get_host(etree.fromstring("""
+ <primitive class="ocf" id="R" provider="heartbeat" type="dummy"/>
+ """)))
+
+ def test_return_none_when_primitive_is_without_agent(self):
+ case_list = [
+ '<primitive id="R"/>',
+ '<primitive id="R" class="ocf"/>',
+ '<primitive id="R" class="ocf" provider="pacemaker"/>',
+ ]
+ for case in case_list:
+ self.assertIsNone(
+ remote_node.get_host(etree.fromstring(case)),
+ "for '{0}' is not returned None".format(case)
+ )
+
+ def test_return_host_from_resource_id(self):
+ self.assertEqual(
+ "R",
+ remote_node.get_host(etree.fromstring("""
+ <primitive class="ocf" id="R" provider="pacemaker"
+ type="remote"
+ />
+ """))
+ )
+
+class Validate(TestCase):
+ def validate(
+ self, instance_attributes=None, node_name="NODE-NAME", host="node-host"
+ ):
+ nodes = [
+ NodeAddresses("RING0", "RING1", name="R"),
+ ]
+ resource_agent = mock.MagicMock()
+ return remote_node.validate_create(
+ nodes,
+ resource_agent,
+ host,
+ node_name,
+ instance_attributes if instance_attributes else {},
+ )
+
+ def test_report_conflict_node_name(self):
+ assert_report_item_list_equal(
+ self.validate(
+ node_name="R",
+ host="host",
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ "id": "R",
+ },
+ None
+ )
+ ]
+ )
+
+ def test_report_conflict_node_host(self):
+ assert_report_item_list_equal(
+ self.validate(
+ host="RING0",
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ "id": "RING0",
+ },
+ None
+ )
+ ]
+ )
+
+ def test_report_conflict_node_host_ring1(self):
+ assert_report_item_list_equal(
+ self.validate(
+ host="RING1",
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ "id": "RING1",
+ },
+ None
+ )
+ ]
+ )
+
+ def test_report_used_disallowed_server(self):
+ assert_report_item_list_equal(
+ self.validate(
+ instance_attributes={"server": "A"}
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ 'option_type': 'resource',
+ 'option_names': ['server'],
+ 'allowed': [],
+ },
+ None
+ )
+ ]
+ )
diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py
index e4fd8e4..05f5831 100644
--- a/pcs/lib/cib/test/test_resource_set.py
+++ b/pcs/lib/cib/test/test_resource_set.py
@@ -41,7 +41,7 @@ class PrepareSetTest(TestCase):
severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "invalid_name",
+ "option_names": ["invalid_name"],
"option_type": None,
"allowed": ["action", "require-all", "role", "sequential"],
}),
diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/lib/cib/test/test_tools.py
similarity index 52%
rename from pcs/test/test_lib_cib_tools.py
rename to pcs/lib/cib/test/test_tools.py
index 8c85b5b..9db8dae 100644
--- a/pcs/test/test_lib_cib_tools.py
+++ b/pcs/lib/cib/test/test_tools.py
@@ -5,29 +5,29 @@ from __future__ import (
unicode_literals,
)
-from pcs.test.tools.pcs_unittest import TestCase
+from functools import partial
-from os.path import join
from lxml import etree
+from pcs.test.tools.pcs_unittest import TestCase
from pcs.test.tools.assertions import (
assert_raise_library_error,
- assert_xml_equal,
+ assert_report_item_list_equal,
)
from pcs.test.tools.misc import get_test_resource as rc
from pcs.test.tools.pcs_unittest import mock
from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
-from pcs import settings
from pcs.common import report_codes
-from pcs.lib.external import CommandRunner
from pcs.lib.errors import ReportItemSeverity as severities
from pcs.lib.cib import tools as lib
class CibToolsTest(TestCase):
def setUp(self):
- self.create_cib = get_xml_manipulation_creator_from_file(rc("cib-empty.xml"))
+ self.create_cib = get_xml_manipulation_creator_from_file(
+ rc("cib-empty.xml")
+ )
self.cib = self.create_cib()
def fixture_add_primitive_with_id(self, element_id):
@@ -37,6 +37,102 @@ class CibToolsTest(TestCase):
.format(element_id)
)
+
+class IdProviderTest(CibToolsTest):
+ def setUp(self):
+ super(IdProviderTest, self).setUp()
+ self.provider = lib.IdProvider(self.cib.tree)
+
+ def fixture_report(self, id):
+ return (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ "id": id,
+ },
+ None
+ )
+
+
+class IdProviderBook(IdProviderTest):
+ def test_nonexisting_id(self):
+ assert_report_item_list_equal(
+ self.provider.book_ids("myId"),
+ []
+ )
+
+ def test_existing_id(self):
+ self.fixture_add_primitive_with_id("myId")
+ assert_report_item_list_equal(
+ self.provider.book_ids("myId"),
+ [
+ self.fixture_report("myId"),
+ ]
+ )
+
+ def test_double_book(self):
+ assert_report_item_list_equal(
+ self.provider.book_ids("myId"),
+ []
+ )
+ assert_report_item_list_equal(
+ self.provider.book_ids("myId"),
+ [
+ self.fixture_report("myId"),
+ ]
+ )
+
+ def test_more_ids(self):
+ assert_report_item_list_equal(
+ self.provider.book_ids("myId1", "myId2"),
+ []
+ )
+ assert_report_item_list_equal(
+ self.provider.book_ids("myId1", "myId2"),
+ [
+ self.fixture_report("myId1"),
+ self.fixture_report("myId2"),
+ ]
+ )
+
+ def test_complex(self):
+ # test ids existing in the cib, double booked, available
+ # test reports not repeated
+ self.fixture_add_primitive_with_id("myId1")
+ self.fixture_add_primitive_with_id("myId2")
+ assert_report_item_list_equal(
+ self.provider.book_ids(
+ "myId1", "myId2", "myId3", "myId2", "myId3", "myId4", "myId3"
+ ),
+ [
+ self.fixture_report("myId1"),
+ self.fixture_report("myId2"),
+ self.fixture_report("myId3"),
+ ]
+ )
+
+
+class IdProviderAllocate(IdProviderTest):
+ def test_nonexisting_id(self):
+ self.assertEqual("myId", self.provider.allocate_id("myId"))
+
+ def test_existing_id(self):
+ self.fixture_add_primitive_with_id("myId")
+ self.assertEqual("myId-1", self.provider.allocate_id("myId"))
+
+ def test_allocate_books(self):
+ self.assertEqual("myId", self.provider.allocate_id("myId"))
+ self.assertEqual("myId-1", self.provider.allocate_id("myId"))
+
+ def test_booked_ids(self):
+ self.fixture_add_primitive_with_id("myId")
+ assert_report_item_list_equal(
+ self.provider.book_ids("myId-1"),
+ []
+ )
+ self.assertEqual("myId-2", self.provider.allocate_id("myId"))
+
+
class DoesIdExistTest(CibToolsTest):
def test_existing_id(self):
self.fixture_add_primitive_with_id("myId")
@@ -51,21 +147,18 @@ class DoesIdExistTest(CibToolsTest):
self.assertFalse(lib.does_id_exist(self.cib.tree, "my Id"))
def test_ignore_status_section(self):
- self.cib.append_to_first_tag_name(
- "status",
- """\
-<elem1 id="status-1">
- <elem1a id="status-1a">
- <elem1aa id="status-1aa"/>
- <elem1ab id="status-1ab"/>
- </elem1a>
- <elem1b id="status-1b">
- <elem1ba id="status-1ba"/>
- <elem1bb id="status-1bb"/>
- </elem1b>
-</elem1>
-"""
- )
+ self.cib.append_to_first_tag_name("status", """
+ <elem1 id="status-1">
+ <elem1a id="status-1a">
+ <elem1aa id="status-1aa"/>
+ <elem1ab id="status-1ab"/>
+ </elem1a>
+ <elem1b id="status-1b">
+ <elem1ba id="status-1ba"/>
+ <elem1bb id="status-1bb"/>
+ </elem1b>
+ </elem1>
+ """)
self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1"))
self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1a"))
self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1aa"))
@@ -100,6 +193,32 @@ class DoesIdExistTest(CibToolsTest):
self.assertFalse(lib.does_id_exist(self.cib.tree, "role1"))
self.assertFalse(lib.does_id_exist(self.cib.tree, "role2"))
+ def test_ignore_sections_directly_under_cib(self):
+ #this is side effect of current implementation but is not problem since
+ #id attribute is not allowed for elements directly under cib
+ tree = etree.fromstring('<cib><direct id="a"/></cib>')
+ self.assertFalse(lib.does_id_exist(tree, "a"))
+
+ def test_find_id_when_cib_is_not_root_element(self):
+ #for example we have only part of xml
+ tree = etree.fromstring('<root><direct id="a"/></root>')
+ self.assertTrue(lib.does_id_exist(tree, "a"))
+
+ def test_find_remote_node_pacemaker_internal_id(self):
+ tree = etree.fromstring("""
+ <cib>
+ <configuration>
+ <resources>
+ <primitive id="b">
+ <meta_attributes>
+ <nvpair name="remote-node" value="a"/>
+ </meta_attributes>
+ </primitive>
+ </resources>
+ </configuration>
+ </cib>
+ """)
+ self.assertTrue(lib.does_id_exist(tree, "a"))
class FindUniqueIdTest(CibToolsTest):
def test_already_unique(self):
@@ -123,6 +242,13 @@ class FindUniqueIdTest(CibToolsTest):
self.fixture_add_primitive_with_id("myId-3")
self.assertEqual("myId-2", lib.find_unique_id(self.cib.tree, "myId"))
+ def test_reserved_ids(self):
+ self.fixture_add_primitive_with_id("myId-1")
+ self.assertEqual(
+ "myId-3",
+ lib.find_unique_id(self.cib.tree, "myId", ["myId", "myId-2"])
+ )
+
class CreateNvsetIdTest(TestCase):
def test_create_plain_id_when_no_confilicting_id_there(self):
context = etree.fromstring('<cib><a id="b"/></cib>')
@@ -203,12 +329,29 @@ class GetResourcesTest(CibToolsTest):
),
)
+class GetNodes(CibToolsTest):
+ def test_success_if_exists(self):
+ self.assertEqual(
+ "nodes",
+ lib.get_nodes(self.cib.tree).tag
+ )
-class GetAclsTest(CibToolsTest):
- def setUp(self):
- self.create_cib = get_xml_manipulation_creator_from_file(rc("cib-empty-1.2.xml"))
- self.cib = self.create_cib()
+ def test_raise_if_missing(self):
+ for section in self.cib.tree.findall(".//configuration/nodes"):
+ section.getparent().remove(section)
+ assert_raise_library_error(
+ lambda: lib.get_nodes(self.cib.tree),
+ (
+ severities.ERROR,
+ report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
+ {
+ "section": "configuration/nodes",
+ },
+ None
+ ),
+ )
+class GetAclsTest(CibToolsTest):
def test_success_if_exists(self):
self.cib.append_to_first_tag_name(
"configuration",
@@ -224,6 +367,23 @@ class GetAclsTest(CibToolsTest):
self.assertEqual("acls", acls.tag)
self.assertEqual("configuration", acls.getparent().tag)
+class GetFencingTopology(CibToolsTest):
+ def test_success_if_exists(self):
+ self.cib.append_to_first_tag_name(
+ "configuration",
+ "<fencing-topology />"
+ )
+ self.assertEqual(
+ "fencing-topology",
+ lib.get_fencing_topology(self.cib.tree).tag
+ )
+
+ def test_success_if_missing(self):
+ ft = lib.get_fencing_topology(self.cib.tree)
+ self.assertEqual("fencing-topology", ft.tag)
+ self.assertEqual("configuration", ft.getparent().tag)
+
+
@mock.patch('pcs.lib.cib.tools.does_id_exist')
class ValidateIdDoesNotExistsTest(TestCase):
def test_success_when_id_does_not_exists(self, does_id_exists):
@@ -244,75 +404,6 @@ class ValidateIdDoesNotExistsTest(TestCase):
does_id_exists.assert_called_once_with("tree", "some-id")
-class GetSubElementTest(TestCase):
- def setUp(self):
- self.root = etree.Element("root")
- self.sub = etree.SubElement(self.root, "sub_element")
-
- def test_sub_element_exists(self):
- self.assertEqual(
- self.sub, lib.get_sub_element(self.root, "sub_element")
- )
-
- def test_new_no_id(self):
- assert_xml_equal(
- '<new_element/>',
- etree.tostring(
- lib.get_sub_element(self.root, "new_element")
- ).decode()
- )
- assert_xml_equal(
- """
- <root>
- <sub_element/>
- <new_element/>
- </root>
- """,
- etree.tostring(self.root).decode()
- )
-
- def test_new_with_id(self):
- assert_xml_equal(
- '<new_element id="new_id"/>',
- etree.tostring(
- lib.get_sub_element(self.root, "new_element", "new_id")
- ).decode()
- )
- assert_xml_equal(
- """
- <root>
- <sub_element/>
- <new_element id="new_id"/>
- </root>
- """,
- etree.tostring(self.root).decode()
- )
-
- def test_new_first(self):
- lib.get_sub_element(self.root, "new_element", "new_id", 0)
- assert_xml_equal(
- """
- <root>
- <new_element id="new_id"/>
- <sub_element/>
- </root>
- """,
- etree.tostring(self.root).decode()
- )
-
- def test_new_last(self):
- lib.get_sub_element(self.root, "new_element", "new_id", None)
- assert_xml_equal(
- """
- <root>
- <sub_element/>
- <new_element id="new_id"/>
- </root>
- """,
- etree.tostring(self.root).decode()
- )
-
-
class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
def test_missing_attribute(self):
assert_raise_library_error(
@@ -355,197 +446,113 @@ class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
)
- at mock.patch("pcs.lib.cib.tools.upgrade_cib")
-class EnsureCibVersionTest(TestCase):
- def setUp(self):
- self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
- self.cib = etree.XML('<cib validate-with="pacemaker-2.3.4"/>')
-
- def test_same_version(self, mock_upgrade_cib):
- self.assertTrue(
- lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 4)
- ) is None
+find_group = partial(lib.find_element_by_tag_and_id, "group")
+class FindTagWithId(TestCase):
+ def test_returns_element_when_exists(self):
+ tree = etree.fromstring(
+ '<cib><resources><group id="a"/></resources></cib>'
)
- self.assertEqual(0, mock_upgrade_cib.run.call_count)
+ element = find_group(tree.find(".//resources"), "a")
+ self.assertEqual("group", element.tag)
+ self.assertEqual("a", element.attrib["id"])
- def test_higher_version(self, mock_upgrade_cib):
- self.assertTrue(
- lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 3)
- ) is None
+ def test_returns_element_when_exists_one_of_tags(self):
+ tree = etree.fromstring("""
+ <cib>
+ <resources>
+ <group id="a"/>
+ <primitive id="b"/>
+ </resources>
+ </cib>
+ """)
+ element = lib.find_element_by_tag_and_id(
+ ["group", "primitive"],
+ tree.find(".//resources"),
+ "a"
)
- self.assertEqual(0, mock_upgrade_cib.call_count)
+ self.assertEqual("group", element.tag)
+ self.assertEqual("a", element.attrib["id"])
- def test_upgraded_same_version(self, mock_upgrade_cib):
- upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.5"/>')
- mock_upgrade_cib.return_value = upgraded_cib
- self.assertEqual(
- upgraded_cib,
- lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 5)
- )
+ def test_raises_when_is_under_another_tag(self):
+ tree = etree.fromstring(
+ '<cib><resources><primitive id="a"/></resources></cib>'
)
- mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
- def test_upgraded_higher_version(self, mock_upgrade_cib):
- upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.6"/>')
- mock_upgrade_cib.return_value = upgraded_cib
- self.assertEqual(
- upgraded_cib,
- lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 5)
- )
+ assert_raise_library_error(
+ lambda: find_group(tree.find(".//resources"), "a"),
+ (
+ severities.ERROR,
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ {
+ "id": "a",
+ "expected_types": ["group"],
+ "current_type": "primitive",
+ },
+ ),
)
- mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
- def test_upgraded_lower_version(self, mock_upgrade_cib):
- mock_upgrade_cib.return_value = self.cib
+ def test_raises_when_is_under_another_context(self):
+ tree = etree.fromstring("""
+ <cib>
+ <resources>
+ <group id="g1"><primitive id="a"/></group>
+ <group id="g2"><primitive id="b"/></group>
+ </resources>
+ </cib>
+ """)
assert_raise_library_error(
- lambda: lib.ensure_cib_version(
- self.mock_runner, self.cib, (2, 3, 5)
+ lambda: lib.find_element_by_tag_and_id(
+ "primitive",
+ tree.find('.//resources/group[@id="g2"]'),
+ "a"
),
(
severities.ERROR,
- report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
+ report_codes.OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT,
{
- "required_version": "2.3.5",
- "current_version": "2.3.4"
- }
- )
+ "type": "primitive",
+ "id": "a",
+ "expected_context_type": "group",
+ "expected_context_id": "g2",
+ },
+ ),
)
- mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
-
- at mock.patch("tempfile.NamedTemporaryFile")
-class UpgradeCibTest(TestCase):
- def setUp(self):
- self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-
- def test_success(self, mock_named_file):
- mock_file = mock.MagicMock()
- mock_file.name = "mock_file_name"
- mock_file.read.return_value = "<cib/>"
- mock_named_file.return_value = mock_file
- self.mock_runner.run.return_value = ("", "", 0)
- assert_xml_equal(
- "<cib/>",
- etree.tostring(
- lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner)
- ).decode()
- )
- mock_named_file.assert_called_once_with("w+", suffix=".pcs")
- mock_file.write.assert_called_once_with("<old_cib/>")
- mock_file.flush.assert_called_once_with()
- self.mock_runner.run.assert_called_once_with(
- [
- join(settings.pacemaker_binaries, "cibadmin"),
- "--upgrade",
- "--force"
- ],
- env_extend={"CIB_file": "mock_file_name"}
- )
- mock_file.seek.assert_called_once_with(0)
- mock_file.read.assert_called_once_with()
-
- def test_upgrade_failed(self, mock_named_file):
- mock_file = mock.MagicMock()
- mock_file.name = "mock_file_name"
- mock_named_file.return_value = mock_file
- self.mock_runner.run.return_value = ("some info", "some error", 1)
+ def test_raises_when_id_does_not_exists(self):
+ tree = etree.fromstring('<cib><resources/></cib>')
assert_raise_library_error(
- lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
+ lambda: find_group(tree.find('.//resources'), "a"),
(
severities.ERROR,
- report_codes.CIB_UPGRADE_FAILED,
+ report_codes.ID_NOT_FOUND,
{
- "reason": "some error\nsome info",
- }
- )
+ "id": "a",
+ "id_description": "group",
+ "context_type": "resources",
+ "context_id": "",
+ },
+ ),
)
- mock_named_file.assert_called_once_with("w+", suffix=".pcs")
- mock_file.write.assert_called_once_with("<old_cib/>")
- mock_file.flush.assert_called_once_with()
- self.mock_runner.run.assert_called_once_with(
- [
- join(settings.pacemaker_binaries, "cibadmin"),
- "--upgrade",
- "--force"
- ],
- env_extend={"CIB_file": "mock_file_name"}
- )
-
- def test_unable_to_parse_upgraded_cib(self, mock_named_file):
- mock_file = mock.MagicMock()
- mock_file.name = "mock_file_name"
- mock_file.read.return_value = "not xml"
- mock_named_file.return_value = mock_file
- self.mock_runner.run.return_value = ("", "", 0)
assert_raise_library_error(
- lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
+ lambda: find_group(
+ tree.find('.//resources'),
+ "a",
+ id_description="resource group"
+ ),
(
severities.ERROR,
- report_codes.CIB_UPGRADE_FAILED,
+ report_codes.ID_NOT_FOUND,
{
- "reason":
- "Start tag expected, '<' not found, line 1, column 1",
- }
- )
- )
- mock_named_file.assert_called_once_with("w+", suffix=".pcs")
- mock_file.write.assert_called_once_with("<old_cib/>")
- mock_file.flush.assert_called_once_with()
- self.mock_runner.run.assert_called_once_with(
- [
- join(settings.pacemaker_binaries, "cibadmin"),
- "--upgrade",
- "--force"
- ],
- env_extend={"CIB_file": "mock_file_name"}
- )
- mock_file.seek.assert_called_once_with(0)
- mock_file.read.assert_called_once_with()
-
-
-class EtreeElementAttributesToDictTest(TestCase):
- def setUp(self):
- self.el = etree.Element(
- "test_element",
- {
- "id": "test_id",
- "description": "some description",
- "attribute": "value",
- }
- )
-
- def test_only_existing(self):
- self.assertEqual(
- {
- "id": "test_id",
- "attribute": "value",
- },
- lib.etree_element_attibutes_to_dict(self.el, ["id", "attribute"])
- )
-
- def test_only_not_existing(self):
- self.assertEqual(
- {
- "_id": None,
- "not_existing": None,
- },
- lib.etree_element_attibutes_to_dict(
- self.el, ["_id", "not_existing"]
- )
+ "id": "a",
+ "id_description": "resource group",
+ },
+ ),
)
- def test_mix(self):
- self.assertEqual(
- {
- "id": "test_id",
- "attribute": "value",
- "not_existing": None,
- },
- lib.etree_element_attibutes_to_dict(
- self.el, ["id", "not_existing", "attribute"]
- )
- )
+ def test_returns_none_if_id_do_not_exists(self):
+ tree = etree.fromstring('<cib><resources/></cib>')
+ self.assertIsNone(find_group(
+ tree.find('.//resources'),
+ "a",
+ none_if_id_unused=True
+ ))
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index 06a9671..2308a42 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -5,16 +5,54 @@ from __future__ import (
unicode_literals,
)
-import os
import re
-import tempfile
-from lxml import etree
-from pcs import settings
-from pcs.common.tools import join_multilines
+from pcs.common.tools import is_string
from pcs.lib import reports
from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker_values import validate_id
+from pcs.lib.pacemaker.values import validate_id
+from pcs.lib.xml_tools import (
+ get_root,
+ get_sub_element,
+)
+
+class IdProvider(object):
+ """
+ Book ids for future use in the CIB and generate new ids accordingly
+ """
+ def __init__(self, cib_element):
+ """
+ etree cib_element -- any element of the xml to being check against
+ """
+ self._cib = get_root(cib_element)
+ self._booked_ids = set()
+
+ def allocate_id(self, proposed_id):
+ """
+ Generate a new unique id based on the proposal and keep track of it
+ string proposed_id -- requested id
+ """
+ final_id = find_unique_id(self._cib, proposed_id, self._booked_ids)
+ self._booked_ids.add(final_id)
+ return final_id
+
+ def book_ids(self, *id_list):
+ """
+ Check if the ids are not already used and reserve them for future use
+ strings *id_list -- ids
+ """
+ reported_ids = set()
+ report_list = []
+ for id in id_list:
+ if id in reported_ids:
+ continue
+ if id in self._booked_ids or does_id_exist(self._cib, id):
+ report_list.append(reports.id_already_exists(id))
+ reported_ids.add(id)
+ continue
+ self._booked_ids.add(id)
+ return report_list
+
def does_id_exist(tree, check_id):
"""
@@ -22,16 +60,40 @@ def does_id_exist(tree, check_id):
tree cib etree node
check_id id to check
"""
- # ElementTree has getroot, Elemet has getroottree
- root = tree.getroot() if hasattr(tree, "getroot") else tree.getroottree()
+
# do not search in /cib/status, it may contain references to previously
# existing and deleted resources and thus preventing creating them again
- existing = root.xpath(
+
+ #pacemaker creates an implicit resource for the pacemaker_remote connection,
+ #which will be named the same as the value of the remote-node attribute of
+ #the explicit resource. So the value of nvpair named "remote-node" is
+ #considered to be id
+ existing = get_root(tree).xpath("""
(
- '(/cib/*[name()!="status"]|/*[name()!="cib"])' +
- '//*[name()!="acl_target" and name()!="role" and @id="{0}"]'
- ).format(check_id)
- )
+ /cib/*[name()!="status"]
+ |
+ /*[name()!="cib"]
+ )
+ //*[
+ (
+ name()!="acl_target"
+ and
+ name()!="role"
+ and
+ @id="{0}"
+ ) or (
+ name()="primitive"
+ and
+ meta_attributes[
+ nvpair[
+ @name="remote-node"
+ and
+ @value="{0}"
+ ]
+ ]
+ )
+ ]
+ """.format(check_id))
return len(existing) > 0
def validate_id_does_not_exist(tree, id):
@@ -41,20 +103,80 @@ def validate_id_does_not_exist(tree, id):
if does_id_exist(tree, id):
raise LibraryError(reports.id_already_exists(id))
-def find_unique_id(tree, check_id):
+def find_unique_id(tree, check_id, reserved_ids=None):
"""
Returns check_id if it doesn't exist in the dom, otherwise it adds
an integer to the end of the id and increments it until a unique id is found
- tree cib etree node
- check_id id to check
+ etree tree -- cib etree node
+ string check_id -- id to check
+ iterable reserved_ids -- ids to think about as already used
"""
+ if not reserved_ids:
+ reserved_ids = set()
counter = 1
temp_id = check_id
- while does_id_exist(tree, temp_id):
+ while temp_id in reserved_ids or does_id_exist(tree, temp_id):
temp_id = "{0}-{1}".format(check_id, counter)
counter += 1
return temp_id
+def find_element_by_tag_and_id(
+ tag, context_element, element_id, none_if_id_unused=False, id_description=""
+):
+ """
+ Return element with given tag and element_id under context_element. When
+ element does not exists raises LibraryError or return None if specified in
+ none_if_id_unused.
+
+ etree.Element(Tree) context_element is part of tree for element scan
+ string|list tag is expected tag (or list of tags) of search element
+ string element_id is id of search element
+ bool none_if_id_unused if the element is not found then return None if True
+ or raise a LibraryError if False
+ string id_description optional description for id
+ """
+ tag_list = [tag] if is_string(tag) else tag
+ element_list = context_element.xpath(
+ './/*[({0}) and @id="{1}"]'.format(
+ " or ".join(["self::{0}".format(one_tag) for one_tag in tag_list]),
+ element_id
+ )
+ )
+
+ if element_list:
+ return element_list[0]
+
+ element = get_root(context_element).find(
+ './/*[@id="{0}"]'.format(element_id)
+ )
+
+ if element is not None:
+ raise LibraryError(
+ reports.id_belongs_to_unexpected_type(
+ element_id,
+ expected_types=tag_list,
+ current_type=element.tag
+ ) if element.tag not in tag_list
+ else reports.object_with_id_in_unexpected_context(
+ element.tag,
+ element_id,
+ context_element.tag,
+ context_element.attrib.get("id", "")
+ )
+ )
+
+ if none_if_id_unused:
+ return None
+
+ raise LibraryError(
+ reports.id_not_found(
+ element_id,
+ id_description if id_description else "/".join(tag_list),
+ context_element.tag,
+ context_element.attrib.get("id", "")
+ )
+ )
+
def create_subelement_id(context_element, suffix):
return find_unique_id(
context_element,
@@ -87,11 +209,7 @@ def get_acls(tree):
Return 'acls' element from tree, create a new one if missing
tree cib etree node
"""
- acls = tree.find(".//acls")
- if acls is None:
- acls = etree.SubElement(get_configuration(tree), "acls")
- return acls
-
+ return get_sub_element(get_configuration(tree), "acls")
def get_alerts(tree):
"""
@@ -100,7 +218,6 @@ def get_alerts(tree):
"""
return get_sub_element(get_configuration(tree), "alerts")
-
def get_constraints(tree):
"""
Return 'constraint' element from tree
@@ -108,47 +225,26 @@ def get_constraints(tree):
"""
return _get_mandatory_section(tree, "configuration/constraints")
-def get_resources(tree):
+def get_fencing_topology(tree):
"""
- Return 'resources' element from tree
- tree cib etree node
+ Return the 'fencing-topology' element from the tree
+ tree -- cib etree node
"""
- return _get_mandatory_section(tree, "configuration/resources")
-
-def find_parent(element, tag_names):
- candidate = element
- while True:
- if candidate is None or candidate.tag in tag_names:
- return candidate
- candidate = candidate.getparent()
-
-def export_attributes(element):
- return dict((key, value) for key, value in element.attrib.items())
-
+ return get_sub_element(get_configuration(tree), "fencing-topology")
-def get_sub_element(element, sub_element_tag, new_id=None, new_index=None):
+def get_nodes(tree):
"""
- Returns the FIRST sub-element sub_element_tag of element. It will create new
- element if such doesn't exist yet. Id of new element will be new_if if
- it's not None. new_index specify where will be new element added, if None
- it will be appended.
-
- element -- parent element
- sub_element_tag -- tag of wanted element
- new_id -- id of new element
- new_index -- index for new element
+ Return 'nodes' element from the tree
+ tree cib etree node
"""
- sub_element = element.find("./{0}".format(sub_element_tag))
- if sub_element is None:
- sub_element = etree.Element(sub_element_tag)
- if new_id:
- sub_element.set("id", new_id)
- if new_index is None:
- element.append(sub_element)
- else:
- element.insert(new_index, sub_element)
- return sub_element
+ return _get_mandatory_section(tree, "configuration/nodes")
+def get_resources(tree):
+ """
+ Return the 'resources' element from the tree
+ tree -- cib etree node
+ """
+ return _get_mandatory_section(tree, "configuration/resources")
def get_pacemaker_version_by_which_cib_was_validated(cib):
"""
@@ -173,86 +269,3 @@ def get_pacemaker_version_by_which_cib_was_validated(cib):
int(match.group("minor")),
int(match.group("rev") or 0)
)
-
-
-def upgrade_cib(cib, runner):
- """
- Upgrade CIB to the latest schema of installed pacemaker. Returns upgraded
- CIB as string.
- Raises LibraryError on any failure.
-
- cib -- cib etree
- runner -- CommandRunner
- """
- temp_file = None
- try:
- temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
- temp_file.write(etree.tostring(cib).decode())
- temp_file.flush()
- stdout, stderr, retval = runner.run(
- [
- os.path.join(settings.pacemaker_binaries, "cibadmin"),
- "--upgrade",
- "--force"
- ],
- env_extend={"CIB_file": temp_file.name}
- )
-
- if retval != 0:
- temp_file.close()
- raise LibraryError(
- reports.cib_upgrade_failed(join_multilines([stderr, stdout]))
- )
-
- temp_file.seek(0)
- return etree.fromstring(temp_file.read())
- except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e:
- raise LibraryError(reports.cib_upgrade_failed(str(e)))
- finally:
- if temp_file:
- temp_file.close()
-
-
-def ensure_cib_version(runner, cib, version):
- """
- This method ensures that specified cib is verified by pacemaker with
- version 'version' or newer. If cib doesn't correspond to this version,
- method will try to upgrade cib.
- Returns cib which was verified by pacemaker version 'version' or later.
- Raises LibraryError on any failure.
-
- runner -- CommandRunner
- cib -- cib tree
- version -- tuple of integers (<major>, <minor>, <revision>)
- """
- current_version = get_pacemaker_version_by_which_cib_was_validated(
- cib
- )
- if current_version >= version:
- return None
-
- upgraded_cib = upgrade_cib(cib, runner)
- current_version = get_pacemaker_version_by_which_cib_was_validated(
- upgraded_cib
- )
-
- if current_version >= version:
- return upgraded_cib
-
- raise LibraryError(reports.unable_to_upgrade_cib_to_required_version(
- current_version, version
- ))
-
-
-def etree_element_attibutes_to_dict(etree_el, required_key_list):
- """
- Returns all attributes of etree_el from required_key_list in dictionary,
- where keys are attributes and values are values of attributes or None if
- it's not present.
-
- etree_el -- etree element from which attributes should be extracted
- required_key_list -- list of strings, attributes names which should be
- extracted
- """
- return dict([(key, etree_el.get(key)) for key in required_key_list])
-
diff --git a/pcs/lib/cluster_conf_facade.py b/pcs/lib/cluster_conf_facade.py
index 5ebc0e8..5f49c0f 100644
--- a/pcs/lib/cluster_conf_facade.py
+++ b/pcs/lib/cluster_conf_facade.py
@@ -7,6 +7,7 @@ from __future__ import (
from lxml import etree
+from pcs.common.tools import xml_fromstring
from pcs.lib import reports
from pcs.lib.errors import LibraryError
from pcs.lib.node import NodeAddresses, NodeAddressesList
@@ -24,7 +25,7 @@ class ClusterConfFacade(object):
config_string -- cluster.conf file content as string
"""
try:
- return cls(etree.fromstring(config_string))
+ return cls(xml_fromstring(config_string))
except (etree.XMLSyntaxError, etree.DocumentInvalid) as e:
raise LibraryError(reports.cluster_conf_invalid_format(str(e)))
@@ -56,4 +57,3 @@ class ClusterConfFacade(object):
id=node.get("nodeid")
))
return result
-
diff --git a/pcs/lib/commands/acl.py b/pcs/lib/commands/acl.py
index 276f0b3..3c368de 100644
--- a/pcs/lib/commands/acl.py
+++ b/pcs/lib/commands/acl.py
@@ -5,13 +5,19 @@ from __future__ import (
unicode_literals,
)
-from pcs.lib import reports
+from contextlib import contextmanager
+
from pcs.lib.cib import acl
-from pcs.lib.errors import LibraryError
+from pcs.lib.cib.tools import get_acls
REQUIRED_CIB_VERSION = (2, 0, 0)
+ at contextmanager
+def cib_acl_section(env):
+ cib = env.get_cib(REQUIRED_CIB_VERSION)
+ yield get_acls(cib)
+ env.push_cib(cib)
def create_role(lib_env, role_id, permission_info_list, description):
"""
@@ -24,16 +30,12 @@ def create_role(lib_env, role_id, permission_info_list, description):
(<read|write|deny>, <xpath|id>, <any string>)
description -- text description for role
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-
- if permission_info_list:
- acl.validate_permissions(cib, permission_info_list)
- role_el = acl.create_role(cib, role_id, description)
- if permission_info_list:
- acl.add_permissions_to_role(role_el, permission_info_list)
-
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ if permission_info_list:
+ acl.validate_permissions(acl_section, permission_info_list)
+ role_el = acl.create_role(acl_section, role_id, description)
+ if permission_info_list:
+ acl.add_permissions_to_role(role_el, permission_info_list)
def remove_role(lib_env, role_id, autodelete_users_groups=False):
"""
@@ -45,56 +47,26 @@ def remove_role(lib_env, role_id, autodelete_users_groups=False):
autodelete_users_groups -- if True targets and groups which are empty after
removal will be removed
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- try:
- acl.remove_role(cib, role_id, autodelete_users_groups)
- except acl.AclRoleNotFound as e:
- raise LibraryError(acl.acl_error_to_report_item(e))
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.remove_role(acl_section, role_id, autodelete_users_groups)
def assign_role_not_specific(lib_env, role_id, target_or_group_id):
"""
- Assign role wth id role_id to target or group with id target_or_group_id.
- Target element has bigger pririty so if there are target and group with same
- id only target element will be affected by this function.
+ Assign role with id role_id to target or group with id target_or_group_id.
+ Target element has bigger priority so if there are target and group with
+ the same id only target element will be affected by this function.
Raises LibraryError on any failure.
lib_env -- LibraryEnviroment
- role_id -- id of role which should be assigne to target/group
+ role_id -- id of role which should be assigned to target/group
target_or_group_id -- id of target/group element
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- try:
+ with cib_acl_section(lib_env) as acl_section:
acl.assign_role(
- _get_target_or_group(cib, target_or_group_id),
- acl.find_role(cib, role_id)
+ acl_section,
+ role_id,
+ acl.find_target_or_group(acl_section, target_or_group_id),
)
- except acl.AclError as e:
- raise LibraryError(acl.acl_error_to_report_item(e))
- lib_env.push_cib(cib)
-
-
-def _get_target_or_group(cib, target_or_group_id):
- """
- Returns acl_target or acl_group element with id target_or_group_id. Target
- element has bigger pririty so if there are target and group with same id
- only target element will be affected by this function.
- Raises LibraryError if there is no target or group element with
- specified id.
-
- cib -- cib etree node
- target_or_group_id -- id of target/group element which should be returned
- """
- try:
- return acl.find_target(cib, target_or_group_id)
- except acl.AclTargetNotFound:
- try:
- return acl.find_group(cib, target_or_group_id)
- except acl.AclGroupNotFound:
- raise LibraryError(
- reports.id_not_found(target_or_group_id, "user/group")
- )
def assign_role_to_target(lib_env, role_id, target_id):
"""
@@ -105,15 +77,12 @@ def assign_role_to_target(lib_env, role_id, target_id):
role_id -- id of acl_role element which should be assigned to target
target_id -- id of acl_target element to which role should be assigned
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- try:
+ with cib_acl_section(lib_env) as acl_section:
acl.assign_role(
- acl.find_target(cib, target_id), acl.find_role(cib, role_id)
+ acl_section,
+ role_id,
+ acl.find_target(acl_section, target_id),
)
- except acl.AclError as e:
- raise LibraryError(acl.acl_error_to_report_item(e))
- lib_env.push_cib(cib)
-
def assign_role_to_group(lib_env, role_id, group_id):
"""
@@ -124,23 +93,20 @@ def assign_role_to_group(lib_env, role_id, group_id):
role_id -- id of acl_role element which should be assigned to group
group_id -- id of acl_group element to which role should be assigned
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- try:
+ with cib_acl_section(lib_env) as acl_section:
acl.assign_role(
- acl.find_group(cib, group_id), acl.find_role(cib, role_id)
+ acl_section,
+ role_id,
+ acl.find_group(acl_section, group_id),
)
- except acl.AclError as e:
- raise LibraryError(acl.acl_error_to_report_item(e))
- lib_env.push_cib(cib)
-
def unassign_role_not_specific(
lib_env, role_id, target_or_group_id, autodelete_target_group=False
):
"""
Unassign role with role_id from target/group with id target_or_group_id.
- Target element has bigger pririty so if there are target and group with same
- id only target element will be affected by this function.
+ Target element has bigger priority so if there are target and group with
+ the same id only target element will be affected by this function.
Raises LibraryError on any failure.
lib_env -- LibraryEnvironment
@@ -149,14 +115,12 @@ def unassign_role_not_specific(
autodelete_target_group -- if True remove target/group element if has no
more role assigned
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- acl.unassign_role(
- _get_target_or_group(cib, target_or_group_id),
- role_id,
- autodelete_target_group
- )
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.unassign_role(
+ acl.find_target_or_group(acl_section, target_or_group_id),
+ role_id,
+ autodelete_target_group
+ )
def unassign_role_from_target(
lib_env, role_id, target_id, autodelete_target=False
@@ -171,17 +135,12 @@ def unassign_role_from_target(
autodelete_target -- if True remove target element if has no more role
assigned
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- try:
+ with cib_acl_section(lib_env) as acl_section:
acl.unassign_role(
- acl.find_target(cib, target_id),
+ acl.find_target(acl_section, target_id),
role_id,
autodelete_target
)
- except acl.AclError as e:
- raise LibraryError(acl.acl_error_to_report_item(e))
- lib_env.push_cib(cib)
-
def unassign_role_from_group(
lib_env, role_id, group_id, autodelete_group=False
@@ -196,36 +155,12 @@ def unassign_role_from_group(
autodelete_target -- if True remove group element if has no more role
assigned
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- try:
+ with cib_acl_section(lib_env) as acl_section:
acl.unassign_role(
- acl.find_group(cib, group_id),
+ acl.find_group(acl_section, group_id),
role_id,
autodelete_group
)
- except acl.AclError as e:
- raise LibraryError(acl.acl_error_to_report_item(e))
- lib_env.push_cib(cib)
-
-
-def _assign_roles_to_element(cib, element, role_id_list):
- """
- Assign roles from role_id_list to element.
- Raises LibraryError on any failure.
-
- cib -- cib etree node
- element -- element to which specified roles should be assigned
- role_id_list -- list of role id
- """
- report_list = []
- for role_id in role_id_list:
- try:
- acl.assign_role(element, acl.find_role(cib, role_id))
- except acl.AclError as e:
- report_list.append(acl.acl_error_to_report_item(e))
- if report_list:
- raise LibraryError(*report_list)
-
def create_target(lib_env, target_id, role_list):
"""
@@ -236,10 +171,12 @@ def create_target(lib_env, target_id, role_list):
target_id -- id of new target
role_list -- list of roles to assign to new target
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- _assign_roles_to_element(cib, acl.create_target(cib, target_id), role_list)
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.assign_all_roles(
+ acl_section,
+ role_list,
+ acl.create_target(acl_section, target_id)
+ )
def create_group(lib_env, group_id, role_list):
"""
@@ -250,10 +187,12 @@ def create_group(lib_env, group_id, role_list):
group_id -- id of new group
role_list -- list of roles to assign to new group
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- _assign_roles_to_element(cib, acl.create_group(cib, group_id), role_list)
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.assign_all_roles(
+ acl_section,
+ role_list,
+ acl.create_group(acl_section, group_id)
+ )
def remove_target(lib_env, target_id):
"""
@@ -263,10 +202,8 @@ def remove_target(lib_env, target_id):
lib_env -- LibraryEnvironment
target_id -- id of taget which should be removed
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- acl.remove_target(cib, target_id)
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.remove_target(acl_section, target_id)
def remove_group(lib_env, group_id):
"""
@@ -276,10 +213,8 @@ def remove_group(lib_env, group_id):
lib_env -- LibraryEnvironment
group_id -- id of group which should be removed
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- acl.remove_group(cib, group_id)
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.remove_group(acl_section, group_id)
def add_permission(lib_env, role_id, permission_info_list):
"""
@@ -292,13 +227,12 @@ def add_permission(lib_env, role_id, permission_info_list):
permission_info_list -- list of permissons, items of list should be tuples:
(<read|write|deny>, <xpath|id>, <any string>)
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- acl.validate_permissions(cib, permission_info_list)
- acl.add_permissions_to_role(
- acl.provide_role(cib, role_id), permission_info_list
- )
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.validate_permissions(acl_section, permission_info_list)
+ acl.add_permissions_to_role(
+ acl.provide_role(acl_section, role_id),
+ permission_info_list
+ )
def remove_permission(lib_env, permission_id):
"""
@@ -308,14 +242,12 @@ def remove_permission(lib_env, permission_id):
lib_env -- LibraryEnvironment
permission_id -- id of permission element which should be removed
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
- acl.remove_permission(cib, permission_id)
- lib_env.push_cib(cib)
-
+ with cib_acl_section(lib_env) as acl_section:
+ acl.remove_permission(acl_section, permission_id)
def get_config(lib_env):
"""
- Returns ACL configuration in disctionary. Fromat of output:
+ Returns ACL configuration in dictionary. Format of output:
{
"target_list": <list of targets>,
"group_list": <list og groups>,
@@ -324,10 +256,9 @@ def get_config(lib_env):
lib_env -- LibraryEnvironment
"""
- cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+ acl_section = get_acls(lib_env.get_cib(REQUIRED_CIB_VERSION))
return {
- "target_list": acl.get_target_list(cib),
- "group_list": acl.get_group_list(cib),
- "role_list": acl.get_role_list(cib),
+ "target_list": acl.get_target_list(acl_section),
+ "group_list": acl.get_group_list(acl_section),
+ "role_list": acl.get_role_list(acl_section),
}
-
diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
index 1ae5405..a11d0a7 100644
--- a/pcs/lib/commands/alert.py
+++ b/pcs/lib/commands/alert.py
@@ -33,7 +33,7 @@ def create_alert(
description -- alert description description
"""
if not path:
- raise LibraryError(reports.required_option_is_missing("path"))
+ raise LibraryError(reports.required_option_is_missing(["path"]))
cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
@@ -86,6 +86,7 @@ def remove_alert(lib_env, alert_id_list):
alert.remove_alert(cib, alert_id)
except LibraryError as e:
report_list += e.args
+
lib_env.report_processor.process_list(report_list)
lib_env.push_cib(cib)
@@ -114,7 +115,7 @@ def add_recipient(
"""
if not recipient_value:
raise LibraryError(
- reports.required_option_is_missing("value")
+ reports.required_option_is_missing(["value"])
)
cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py
index 705900a..73a3853 100644
--- a/pcs/lib/commands/booth.py
+++ b/pcs/lib/commands/booth.py
@@ -11,7 +11,8 @@ from functools import partial
from pcs import settings
from pcs.common.tools import join_multilines
-from pcs.lib import external, reports
+from pcs.lib import external, reports, tools
+from pcs.lib.cib.resource import primitive, group
from pcs.lib.booth import (
config_exchange,
config_files,
@@ -26,6 +27,7 @@ from pcs.lib.booth.env import get_config_file_name
from pcs.lib.cib.tools import get_resources
from pcs.lib.errors import LibraryError, ReportItemSeverity
from pcs.lib.node import NodeAddresses
+from pcs.lib.resource_agent import find_valid_resource_agent_by_name
def config_setup(env, booth_configuration, overwrite_existing=False):
@@ -40,7 +42,7 @@ def config_setup(env, booth_configuration, overwrite_existing=False):
*config_structure.take_peers(config_content)
)
- env.booth.create_key(config_files.generate_key(), overwrite_existing)
+ env.booth.create_key(tools.generate_key(), overwrite_existing)
config_content = config_structure.set_authfile(
config_content,
env.booth.key_path
@@ -145,24 +147,56 @@ def config_ticket_remove(env, ticket_name):
)
env.booth.push_config(build(booth_configuration))
-def create_in_cluster(env, name, ip, resource_create, resource_remove):
- #TODO resource_create is provisional hack until resources are not moved to
- #lib
- resources_section = get_resources(env.get_cib())
+def create_in_cluster(env, name, ip, allow_absent_resource_agent=False):
+ """
+ Create group with ip resource and booth resource
+
+ LibraryEnvironment env provides all for communication with externals
+ string name identifies booth instance
+ string ip determines float ip for the operation of the booth
+ bool allow_absent_resource_agent is flag allowing create booth resource even
+ if its agent is not installed
+ """
+ cib = env.get_cib()
+ resources_section = get_resources(cib)
booth_config_file_path = get_config_file_name(name)
if resource.find_for_config(resources_section, booth_config_file_path):
raise LibraryError(booth_reports.booth_already_in_cib(name))
- resource.get_creator(resource_create, resource_remove)(
- ip,
- booth_config_file_path,
- create_id = partial(
- resource.create_resource_id,
- resources_section,
- name
- )
+ create_id = partial(
+ resource.create_resource_id,
+ resources_section,
+ name
+ )
+ get_agent = partial(
+ find_valid_resource_agent_by_name,
+ env.report_processor,
+ env.cmd_runner(),
+ allowed_absent=allow_absent_resource_agent
)
+ create_primitive = partial(
+ primitive.create,
+ env.report_processor,
+ resources_section,
+ )
+ into_booth_group = partial(
+ group.place_resource,
+ group.provide_group(resources_section, create_id("group")),
+ )
+
+ into_booth_group(create_primitive(
+ create_id("ip"),
+ get_agent("ocf:heartbeat:IPaddr2"),
+ instance_attributes={"ip": ip},
+ ))
+ into_booth_group(create_primitive(
+ create_id("service"),
+ get_agent("ocf:pacemaker:booth-site"),
+ instance_attributes={"config": booth_config_file_path},
+ ))
+
+ env.push_cib(cib)
def remove_from_cluster(env, name, resource_remove, allow_remove_multiple):
#TODO resource_remove is provisional hack until resources are not moved to
diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
new file mode 100644
index 0000000..7386e3c
--- /dev/null
+++ b/pcs/lib/commands/cluster.py
@@ -0,0 +1,495 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.common import report_codes
+from pcs.lib import reports, nodes_task, node_communication_format
+from pcs.lib.node import(
+ NodeAddresses,
+ NodeAddressesList,
+ node_addresses_contain_name,
+ node_addresses_contain_host,
+)
+from pcs.lib.tools import generate_key
+from pcs.lib.cib.resource import guest_node, primitive, remote_node
+from pcs.lib.cib.tools import get_resources, find_element_by_tag_and_id
+from pcs.lib.env_tools import get_nodes, get_nodes_remote, get_nodes_guest
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker import state
+from pcs.lib.pacemaker.live import remove_node
+
+def _ensure_can_add_node_to_remote_cluster(env, node_addresses):
+ report_items = []
+ nodes_task.check_can_add_node_to_cluster(
+ env.node_communicator(),
+ node_addresses,
+ report_items,
+ check_response=nodes_task.availability_checker_remote_node
+ )
+ env.report_processor.process_list(report_items)
+
+def _share_authkey(
+ env, current_nodes, candidate_node_addresses,
+ allow_incomplete_distribution=False
+):
+ if env.pacemaker.has_authkey:
+ authkey_content = env.pacemaker.get_authkey_content()
+ node_addresses_list = NodeAddressesList([candidate_node_addresses])
+ else:
+ authkey_content = generate_key()
+ node_addresses_list = current_nodes + [candidate_node_addresses]
+
+ nodes_task.distribute_files(
+ env.node_communicator(),
+ env.report_processor,
+ node_communication_format.pcmk_authkey_file(authkey_content),
+ node_addresses_list,
+ allow_incomplete_distribution,
+ description="remote node configuration files"
+ )
+
+def _start_and_enable_pacemaker_remote(env, node_list, allow_fails=False):
+ nodes_task.run_actions_on_multiple_nodes(
+ env.node_communicator(),
+ env.report_processor,
+ node_communication_format.create_pcmk_remote_actions([
+ "start",
+ "enable",
+ ]),
+ lambda key, response: response.code == "success",
+ node_list,
+ allow_fails,
+ description="start of service pacemaker_remote"
+ )
+
+def _prepare_pacemaker_remote_environment(
+ env, current_nodes, node_host, allow_incomplete_distribution, allow_fails
+):
+ if not env.is_corosync_conf_live:
+ env.report_processor.process_list([
+ reports.nolive_skip_files_distribution(
+ ["pacemaker authkey"],
+ [node_host]
+ ),
+ reports.nolive_skip_service_command_on_nodes(
+ "pacemaker_remote",
+ "start",
+ [node_host]
+ ),
+ reports.nolive_skip_service_command_on_nodes(
+ "pacemaker_remote",
+ "enable",
+ [node_host]
+ ),
+ ])
+ return
+
+ candidate_node = NodeAddresses(node_host)
+ _ensure_can_add_node_to_remote_cluster(env, candidate_node)
+ _share_authkey(
+ env,
+ current_nodes,
+ candidate_node,
+ allow_incomplete_distribution
+ )
+ _start_and_enable_pacemaker_remote(env, [candidate_node], allow_fails)
+
+def _ensure_resource_running(env, resource_id):
+ env.report_processor.process(
+ state.ensure_resource_running(env.get_cluster_state(), resource_id)
+ )
+
+def _ensure_consistently_live_env(env):
+ if env.is_cib_live and env.is_corosync_conf_live:
+ return
+
+ #we accept is as well, we need it for tests
+ if not env.is_cib_live and not env.is_corosync_conf_live:
+ return
+
+ raise LibraryError(reports.live_environment_required([
+ "CIB" if not env.is_cib_live else "COROSYNC_CONF"
+ ]))
+
+
+def node_add_remote(
+ env, host, node_name, operations, meta_attributes, instance_attributes,
+ allow_incomplete_distribution=False,
+ allow_pacemaker_remote_service_fail=False,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+ wait=False,
+):
+ """
+ create resource ocf:pacemaker:remote and use it as remote node
+
+ LibraryEnvironment env provides all for communication with externals
+ list of dict operations contains attributes for each entered operation
+ dict meta_attributes contains attributes for primitive/meta_attributes
+ dict instance_attributes contains attributes for
+ primitive/instance_attributes
+ bool allow_incomplete_distribution -- is a flag for allowing successfully
+ finish this command even if is file distribution not succeeded
+ bool allow_pacemaker_remote_service_fail -- is a flag for allowing
+ successfully finish this command even if starting/enabling
+ pacemaker_remote not succeeded
+ bool allow_invalid_operation is a flag for allowing to use operations that
+ are not listed in a resource agent metadata
+ bool allow_invalid_instance_attributes is a flag for allowing to use
+ instance attributes that are not listed in a resource agent metadata
+ or for allowing to not use the instance_attributes that are required in
+ resource agent metadata
+ bool use_default_operations is a flag for stopping stopping of adding
+ default cib operations (specified in a resource agent)
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ """
+ _ensure_consistently_live_env(env)
+ env.ensure_wait_satisfiable(wait)
+
+ cib = env.get_cib()
+ current_nodes = get_nodes(env.get_corosync_conf(), cib)
+
+ resource_agent = remote_node.get_agent(
+ env.report_processor,
+ env.cmd_runner()
+ )
+
+ report_list = remote_node.validate_create(
+ current_nodes,
+ resource_agent,
+ host,
+ node_name,
+ instance_attributes
+ )
+
+ try:
+ remote_resource_element = remote_node.create(
+ env.report_processor,
+ resource_agent,
+ get_resources(cib),
+ host,
+ node_name,
+ operations,
+ meta_attributes,
+ instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ except LibraryError as e:
+ #Check unique id conflict with check against nodes. Until validation
+ #resource create is not separated, we need to make unique post
+ #validation.
+ already_exists = []
+ unified_report_list = []
+ for report in report_list + list(e.args):
+ if report.code != report_codes.ID_ALREADY_EXISTS:
+ unified_report_list.append(report)
+ elif report.info.get["id"] not in already_exists:
+ unified_report_list.append(report)
+ already_exists.append(report.info["id"])
+ report_list = unified_report_list
+
+ env.report_processor.process_list(report_list)
+
+ _prepare_pacemaker_remote_environment(
+ env,
+ current_nodes,
+ host,
+ allow_incomplete_distribution,
+ allow_pacemaker_remote_service_fail,
+ )
+ env.push_cib(cib, wait)
+ if wait:
+ _ensure_resource_running(env, remote_resource_element.attrib["id"])
+
+def node_add_guest(
+ env, node_name, resource_id, options,
+ allow_incomplete_distribution=False,
+ allow_pacemaker_remote_service_fail=False, wait=False,
+):
+
+ """
+ setup resource (resource_id) as guest node and setup node as guest
+
+ LibraryEnvironment env provides all for communication with externals
+ string resource_id -- specifies resource that should be guest node
+ dict options could contain keys remote-node, remote-port, remote-addr,
+ remote-connect-timeout
+ bool allow_incomplete_distribution -- is a flag for allowing successfully
+ finish this command even if is file distribution not succeeded
+ bool allow_pacemaker_remote_service_fail -- is a flag for allowing
+ successfully finish this command even if starting/enabling
+ pacemaker_remote not succeeded
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ """
+ _ensure_consistently_live_env(env)
+ env.ensure_wait_satisfiable(wait)
+
+ cib = env.get_cib()
+ current_nodes = get_nodes(env.get_corosync_conf(), cib)
+
+ report_list = guest_node.validate_set_as_guest(
+ cib,
+ current_nodes,
+ node_name,
+ options
+ )
+ try:
+ resource_element = find_element_by_tag_and_id(
+ primitive.TAG,
+ get_resources(cib),
+ resource_id
+ )
+ report_list.extend(guest_node.validate_is_not_guest(resource_element))
+ except LibraryError as e:
+ report_list.extend(e.args)
+
+ env.report_processor.process_list(report_list)
+
+ guest_node.set_as_guest(
+ resource_element,
+ node_name,
+ options.get("remote-addr", None),
+ options.get("remote-port", None),
+ options.get("remote-connect-timeout", None),
+ )
+
+ _prepare_pacemaker_remote_environment(
+ env,
+ current_nodes,
+ guest_node.get_host_from_options(node_name, options),
+ allow_incomplete_distribution,
+ allow_pacemaker_remote_service_fail,
+ )
+
+ env.push_cib(cib, wait)
+ if wait:
+ _ensure_resource_running(env, resource_id)
+
+def _find_resources_to_remove(
+ cib, report_processor,
+ node_type, node_identifier, allow_remove_multiple_nodes,
+ find_resources
+):
+ resource_element_list = find_resources(get_resources(cib), node_identifier)
+
+ if not resource_element_list:
+ raise LibraryError(reports.node_not_found(node_identifier, node_type))
+
+ if len(resource_element_list) > 1:
+ report_processor.process(
+ reports.get_problem_creator(
+ report_codes.FORCE_REMOVE_MULTIPLE_NODES,
+ allow_remove_multiple_nodes
+ )(
+ reports.multiple_result_found,
+ "resource",
+ [resource.attrib["id"] for resource in resource_element_list],
+ node_identifier
+ )
+ )
+
+ return resource_element_list
+
+def _remove_pcmk_remote_from_cib(
+ nodes, resource_element_list, get_host, remove_resource
+):
+ node_addresses_set = set()
+ for resource_element in resource_element_list:
+ for node in nodes:
+ #remote nodes uses ring0 only
+ if get_host(resource_element) == node.ring0:
+ node_addresses_set.add(node)
+ remove_resource(resource_element)
+
+ return sorted(node_addresses_set, key=lambda node: node.ring0)
+
+def _destroy_pcmk_remote_env(env, node_addresses_list, allow_fails):
+ actions = node_communication_format.create_pcmk_remote_actions([
+ "stop",
+ "disable",
+ ])
+ files = {
+ "pacemaker_remote authkey": {"type": "pcmk_remote_authkey"},
+ }
+
+ nodes_task.run_actions_on_multiple_nodes(
+ env.node_communicator(),
+ env.report_processor,
+ actions,
+ lambda key, response: response.code == "success",
+ node_addresses_list,
+ allow_fails,
+ description="stop of service pacemaker_remote"
+ )
+
+ nodes_task.remove_files(
+ env.node_communicator(),
+ env.report_processor,
+ files,
+ node_addresses_list,
+ allow_fails,
+ description="remote node files"
+ )
+
+def _report_skip_live_parts_in_remove(node_addresses_list):
+ #remote nodes uses ring0 only
+ node_host_list = [addresses.ring0 for addresses in node_addresses_list]
+ return [
+ reports.nolive_skip_service_command_on_nodes(
+ "pacemaker_remote",
+ "stop",
+ node_host_list
+ ),
+ reports.nolive_skip_service_command_on_nodes(
+ "pacemaker_remote",
+ "disable",
+ node_host_list
+ ),
+ reports.nolive_skip_files_remove(["pacemaker authkey"], node_host_list)
+ ]
+
+def node_remove_remote(
+ env, node_identifier, remove_resource,
+ allow_remove_multiple_nodes=False,
+ allow_pacemaker_remote_service_fail=False
+):
+ """
+ remove a resource representing remote node and destroy remote node
+
+ LibraryEnvironment env provides all for communication with externals
+ string node_identifier -- node name or hostname
+ callable remove_resource -- function for remove resource
+ bool allow_remove_multiple_nodes -- is a flag for allowing
+ remove unexpected multiple occurence of remote node for node_identifier
+ bool allow_pacemaker_remote_service_fail -- is a flag for allowing
+ successfully finish this command even if stoping/disabling
+ pacemaker_remote not succeeded
+ """
+
+ _ensure_consistently_live_env(env)
+ cib = env.get_cib()
+ resource_element_list = _find_resources_to_remove(
+ cib,
+ env.report_processor,
+ "remote",
+ node_identifier,
+ allow_remove_multiple_nodes,
+ remote_node.find_node_resources,
+ )
+ node_addresses_list = _remove_pcmk_remote_from_cib(
+ get_nodes_remote(cib),
+ resource_element_list,
+ remote_node.get_host,
+ lambda resource_element: remove_resource(
+ resource_element.attrib["id"],
+ is_remove_remote_context=True,
+ )
+ )
+ if not env.is_corosync_conf_live:
+ env.report_processor.process_list(
+ _report_skip_live_parts_in_remove(node_addresses_list)
+ )
+ return
+
+ #remove node from pcmk caches is currently integrated in remove_resource
+ #function
+ _destroy_pcmk_remote_env(
+ env,
+ node_addresses_list,
+ allow_pacemaker_remote_service_fail
+ )
+
+def node_remove_guest(
+ env, node_identifier,
+ allow_remove_multiple_nodes=False,
+ allow_pacemaker_remote_service_fail=False,
+ wait=False,
+):
+ """
+ remove a resource representing remote node and destroy remote node
+
+ LibraryEnvironment env provides all for communication with externals
+ string node_identifier -- node name, hostname or resource id
+ bool allow_remove_multiple_nodes -- is a flag for allowing
+ remove unexpected multiple occurence of remote node for node_identifier
+ bool allow_pacemaker_remote_service_fail -- is a flag for allowing
+ successfully finish this command even if stoping/disabling
+ pacemaker_remote not succeeded
+ """
+ _ensure_consistently_live_env(env)
+ env.ensure_wait_satisfiable(wait)
+ cib = env.get_cib()
+
+ resource_element_list = _find_resources_to_remove(
+ cib,
+ env.report_processor,
+ "guest",
+ node_identifier,
+ allow_remove_multiple_nodes,
+ guest_node.find_node_resources,
+ )
+
+ node_addresses_list = _remove_pcmk_remote_from_cib(
+ get_nodes_guest(cib),
+ resource_element_list,
+ guest_node.get_host,
+ guest_node.unset_guest,
+ )
+ env.push_cib(cib, wait)
+
+ if not env.is_corosync_conf_live:
+ env.report_processor.process_list(
+ _report_skip_live_parts_in_remove(node_addresses_list)
+ )
+ return
+
+ #remove node from pcmk caches
+ for node_addresses in node_addresses_list:
+ remove_node(env.cmd_runner(), node_addresses.name)
+
+ _destroy_pcmk_remote_env(
+ env,
+ node_addresses_list,
+ allow_pacemaker_remote_service_fail
+ )
+
+def node_clear(env, node_name, allow_clear_cluster_node=False):
+ """
+ Remove specified node from various cluster caches.
+
+ LibraryEnvironment env provides all for communication with externals
+ string node_name
+ bool allow_clear_cluster_node -- flag allows to clear node even if it's
+ still in a cluster
+ """
+ mocked_envs = []
+ if not env.is_cib_live:
+ mocked_envs.append("CIB")
+ if not env.is_corosync_conf_live:
+ mocked_envs.append("COROSYNC_CONF")
+ if mocked_envs:
+ raise LibraryError(reports.live_environment_required(mocked_envs))
+
+ current_nodes = get_nodes(env.get_corosync_conf(), env.get_cib())
+ if(
+ node_addresses_contain_name(current_nodes, node_name)
+ or
+ node_addresses_contain_host(current_nodes, node_name)
+ ):
+ env.report_processor.process(
+ reports.get_problem_creator(
+ report_codes.FORCE_CLEAR_CLUSTER_NODE,
+ allow_clear_cluster_node
+ )(
+ reports.node_to_clear_is_still_in_cluster,
+ node_name
+ )
+ )
+
+ remove_node(env.cmd_runner(), node_name)
diff --git a/pcs/lib/commands/fencing_topology.py b/pcs/lib/commands/fencing_topology.py
new file mode 100644
index 0000000..e7d9003
--- /dev/null
+++ b/pcs/lib/commands/fencing_topology.py
@@ -0,0 +1,122 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.common.fencing_topology import (
+ TARGET_TYPE_REGEXP,
+ TARGET_TYPE_ATTRIBUTE,
+)
+from pcs.lib.cib import fencing_topology as cib_fencing_topology
+from pcs.lib.cib.tools import (
+ get_fencing_topology,
+ get_resources,
+)
+from pcs.lib.pacemaker.live import get_cluster_status_xml
+from pcs.lib.pacemaker.state import ClusterState
+
+def add_level(
+ lib_env, level, target_type, target_value, devices,
+ force_device=False, force_node=False
+):
+ """
+ Validate and add a new fencing level
+
+ LibraryError lib_env -- environment
+ int|string level -- level (index) of the new fencing level
+ constant target_type -- the new fencing level target value type
+ mixed target_value -- the new fencing level target value
+ Iterable devices -- list of stonith devices for the new fencing level
+ bool force_device -- continue even if a stonith device does not exist
+ bool force_node -- continue even if a node (target) does not exist
+ """
+ version_check = None
+ if target_type == TARGET_TYPE_REGEXP:
+ version_check = (2, 3, 0)
+ elif target_type == TARGET_TYPE_ATTRIBUTE:
+ version_check = (2, 4, 0)
+
+ cib = lib_env.get_cib(version_check)
+ cib_fencing_topology.add_level(
+ lib_env.report_processor,
+ get_fencing_topology(cib),
+ get_resources(cib),
+ level,
+ target_type,
+ target_value,
+ devices,
+ ClusterState(
+ get_cluster_status_xml(lib_env.cmd_runner())
+ ).node_section.nodes,
+ force_device,
+ force_node
+ )
+ lib_env.report_processor.send()
+ lib_env.push_cib(cib)
+
+def get_config(lib_env):
+ """
+ Get fencing levels configuration.
+
+ Return a list of levels where each level is a dict with keys: target_type,
+ target_value. level and devices. Devices is a list of stonith device ids.
+
+ LibraryError lib_env -- environment
+ """
+ cib = lib_env.get_cib()
+ return cib_fencing_topology.export(get_fencing_topology(cib))
+
+def remove_all_levels(lib_env):
+ """
+ Remove all fencing levels
+ LibraryError lib_env -- environment
+ """
+ cib = lib_env.get_cib()
+ cib_fencing_topology.remove_all_levels(get_fencing_topology(cib))
+ lib_env.push_cib(cib)
+
+def remove_levels_by_params(
+ lib_env, level=None, target_type=None, target_value=None, devices=None,
+ ignore_if_missing=False
+):
+ """
+ Remove specified fencing level(s)
+
+ LibraryError lib_env -- environment
+ int|string level -- level (index) of the new fencing level
+ constant target_type -- the new fencing level target value type
+ mixed target_value -- the new fencing level target value
+ Iterable devices -- list of stonith devices for the new fencing level
+ bool ignore_if_missing -- when True, do not report if level not found
+ """
+ cib = lib_env.get_cib()
+ cib_fencing_topology.remove_levels_by_params(
+ lib_env.report_processor,
+ get_fencing_topology(cib),
+ level,
+ target_type,
+ target_value,
+ devices,
+ ignore_if_missing
+ )
+ lib_env.report_processor.send()
+ lib_env.push_cib(cib)
+
+def verify(lib_env):
+ """
+ Check if all cluster nodes and stonith devices used in fencing levels exist
+
+ LibraryError lib_env -- environment
+ """
+ cib = lib_env.get_cib()
+ cib_fencing_topology.verify(
+ lib_env.report_processor,
+ get_fencing_topology(cib),
+ get_resources(cib),
+ ClusterState(
+ get_cluster_status_xml(lib_env.cmd_runner())
+ ).node_section.nodes
+ )
+ lib_env.report_processor.send()
diff --git a/pcs/lib/commands/node.py b/pcs/lib/commands/node.py
new file mode 100644
index 0000000..cc9424f
--- /dev/null
+++ b/pcs/lib/commands/node.py
@@ -0,0 +1,166 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from contextlib import contextmanager
+
+from pcs.lib import reports
+from pcs.lib.cib.node import update_node_instance_attrs
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker.live import (
+ get_cluster_status_xml,
+ get_local_node_name,
+)
+from pcs.lib.pacemaker.state import ClusterState
+
+
+ at contextmanager
+def cib_runner_nodes(lib_env, wait):
+ lib_env.ensure_wait_satisfiable(wait)
+ runner = lib_env.cmd_runner()
+ cib = lib_env.get_cib()
+
+ state_nodes = ClusterState(
+ get_cluster_status_xml(runner)
+ ).node_section.nodes
+
+ yield (cib, runner, state_nodes)
+ lib_env.push_cib(cib, wait)
+
+
+def standby_unstandby_local(lib_env, standby, wait=False):
+ """
+ Change local node standby mode
+
+ LibraryEnvironment lib_env
+ bool standby -- True: enable standby, False: disable standby
+ mixed wait -- False: no wait, None: wait with default timeout, str or int:
+ wait with specified timeout
+ """
+ return _set_instance_attrs_local_node(
+ lib_env,
+ _create_standby_unstandby_dict(standby),
+ wait
+ )
+
+def standby_unstandby_list(lib_env, standby, node_names, wait=False):
+ """
+ Change specified nodes standby mode
+
+ LibraryEnvironment lib_env
+ bool standby -- True: enable standby, False: disable standby
+ iterable node_names -- nodes to apply the change to
+ mixed wait -- False: no wait, None: wait with default timeout, str or int:
+ wait with specified timeout
+ """
+ return _set_instance_attrs_node_list(
+ lib_env,
+ _create_standby_unstandby_dict(standby),
+ node_names,
+ wait
+ )
+
+def standby_unstandby_all(lib_env, standby, wait=False):
+ """
+ Change all nodes standby mode
+
+ LibraryEnvironment lib_env
+ bool standby -- True: enable standby, False: disable standby
+ mixed wait -- False: no wait, None: wait with default timeout, str or int:
+ wait with specified timeout
+ """
+ return _set_instance_attrs_all_nodes(
+ lib_env,
+ _create_standby_unstandby_dict(standby),
+ wait
+ )
+
+def maintenance_unmaintenance_local(lib_env, maintenance, wait=False):
+ """
+ Change local node maintenance mode
+
+ LibraryEnvironment lib_env
+ bool maintenance -- True: enable maintenance, False: disable maintenance
+ mixed wait -- False: no wait, None: wait with default timeout, str or int:
+ wait with specified timeout
+ """
+ return _set_instance_attrs_local_node(
+ lib_env,
+ _create_maintenance_unmaintenance_dict(maintenance),
+ wait
+ )
+
+def maintenance_unmaintenance_list(
+ lib_env, maintenance, node_names, wait=False
+):
+ """
+ Change specified nodes maintenance mode
+
+ LibraryEnvironment lib_env
+ bool maintenance -- True: enable maintenance, False: disable maintenance
+ iterable node_names -- nodes to apply the change to
+ mixed wait -- False: no wait, None: wait with default timeout, str or int:
+ wait with specified timeout
+ """
+ return _set_instance_attrs_node_list(
+ lib_env,
+ _create_maintenance_unmaintenance_dict(maintenance),
+ node_names,
+ wait
+ )
+
+def maintenance_unmaintenance_all(lib_env, maintenance, wait=False):
+ """
+ Change all nodes maintenance mode
+
+ LibraryEnvironment lib_env
+ bool maintenance -- True: enable maintenance, False: disable maintenance
+ mixed wait -- False: no wait, None: wait with default timeout, str or int:
+ wait with specified timeout
+ """
+ return _set_instance_attrs_all_nodes(
+ lib_env,
+ _create_maintenance_unmaintenance_dict(maintenance),
+ wait
+ )
+
+def _create_standby_unstandby_dict(standby):
+ return {"standby": "on" if standby else ""}
+
+def _create_maintenance_unmaintenance_dict(maintenance):
+ return {"maintenance": "on" if maintenance else ""}
+
+def _set_instance_attrs_local_node(lib_env, attrs, wait):
+ if not lib_env.is_cib_live:
+ # If we are not working with a live cluster we cannot get the local node
+ # name.
+ raise LibraryError(reports.live_environment_required_for_local_node())
+
+ with cib_runner_nodes(lib_env, wait) as (cib, runner, state_nodes):
+ update_node_instance_attrs(
+ cib,
+ get_local_node_name(runner),
+ attrs,
+ state_nodes
+ )
+
+def _set_instance_attrs_node_list(lib_env, attrs, node_names, wait):
+ with cib_runner_nodes(lib_env, wait) as (cib, dummy_runner, state_nodes):
+ known_nodes = [node.attrs.name for node in state_nodes]
+ report = []
+ for node in node_names:
+ if node not in known_nodes:
+ report.append(reports.node_not_found(node))
+ if report:
+ raise LibraryError(*report)
+
+ for node in node_names:
+ update_node_instance_attrs(cib, node, attrs, state_nodes)
+
+def _set_instance_attrs_all_nodes(lib_env, attrs, wait):
+ with cib_runner_nodes(lib_env, wait) as (cib, dummy_runner, state_nodes):
+ for node in [node.attrs.name for node in state_nodes]:
+ update_node_instance_attrs(cib, node, attrs, state_nodes)
diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
new file mode 100644
index 0000000..a9f8271
--- /dev/null
+++ b/pcs/lib/commands/resource.py
@@ -0,0 +1,726 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from contextlib import contextmanager
+from functools import partial
+
+from pcs.common import report_codes
+from pcs.lib import reports
+from pcs.lib.cib import resource
+from pcs.lib.cib.resource import operations, remote_node, guest_node
+from pcs.lib.cib.tools import (
+ find_element_by_tag_and_id,
+ get_resources,
+ IdProvider,
+)
+from pcs.lib.env_tools import get_nodes
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker.values import validate_id
+from pcs.lib.pacemaker.state import (
+ ensure_resource_state,
+ is_resource_managed,
+ ResourceNotFound,
+)
+from pcs.lib.resource_agent import(
+ find_valid_resource_agent_by_name as get_agent
+)
+
+ at contextmanager
+def resource_environment(
+ env, wait=False, wait_for_resource_ids=None, disabled_after_wait=False,
+ required_cib_version=None
+):
+ env.ensure_wait_satisfiable(wait)
+ cib = env.get_cib(required_cib_version)
+ yield get_resources(cib)
+ env.push_cib(cib, wait)
+ if wait is not False and wait_for_resource_ids:
+ state = env.get_cluster_state()
+ env.report_processor.process_list([
+ ensure_resource_state(not disabled_after_wait, state, res_id)
+ for res_id in wait_for_resource_ids
+ ])
+
+def _validate_remote_connection(
+ nodes, resource_id, instance_attributes, allow_not_suitable_command
+):
+ report_list = []
+ report_list.append(
+ reports.get_problem_creator(
+ report_codes.FORCE_NOT_SUITABLE_COMMAND,
+ allow_not_suitable_command
+ )(reports.use_command_node_add_remote)
+ )
+
+ report_list.extend(
+ remote_node.validate_host_not_conflicts(
+ nodes,
+ resource_id,
+ instance_attributes
+ )
+ )
+ return report_list
+
+def _validate_guest_change(
+ tree, nodes, meta_attributes, allow_not_suitable_command,
+ detect_remove=False
+):
+ if not guest_node.is_node_name_in_options(meta_attributes):
+ return []
+
+ node_name = guest_node.get_node_name_from_options(meta_attributes)
+
+ report_list = []
+ create_report = reports.use_command_node_add_guest
+ if detect_remove and not guest_node.get_guest_option_value(meta_attributes):
+ create_report = reports.use_command_node_remove_guest
+
+ report_list.append(
+ reports.get_problem_creator(
+ report_codes.FORCE_NOT_SUITABLE_COMMAND,
+ allow_not_suitable_command
+ )(create_report)
+ )
+
+ report_list.extend(
+ guest_node.validate_conflicts(
+ tree,
+ nodes,
+ node_name,
+ meta_attributes
+ )
+ )
+
+ return report_list
+
+def _validate_special_cases(
+ nodes, resource_agent, resources_section, resource_id, meta_attributes,
+ instance_attributes, allow_not_suitable_command
+):
+ report_list = []
+
+ if resource_agent.get_name() == remote_node.AGENT_NAME.full_name:
+ report_list.extend(_validate_remote_connection(
+ nodes,
+ resource_id,
+ instance_attributes,
+ allow_not_suitable_command,
+ ))
+
+ report_list.extend(_validate_guest_change(
+ resources_section,
+ nodes,
+ meta_attributes,
+ allow_not_suitable_command,
+ ))
+
+ return report_list
+
+def create(
+ env, resource_id, resource_agent_name,
+ operations, meta_attributes, instance_attributes,
+ allow_absent_agent=False,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+ ensure_disabled=False,
+ wait=False,
+ allow_not_suitable_command=False,
+):
+ """
+ Create resource in a cib.
+
+ LibraryEnvironment env provides all for communication with externals
+ string resource_id is identifier of resource
+ string resource_agent_name contains name for the identification of agent
+ list of dict operations contains attributes for each entered operation
+ dict meta_attributes contains attributes for primitive/meta_attributes
+ dict instance_attributes contains attributes for
+ primitive/instance_attributes
+ bool allow_absent_agent is a flag for allowing agent that is not installed
+ in a system
+ bool allow_invalid_operation is a flag for allowing to use operations that
+ are not listed in a resource agent metadata
+ bool allow_invalid_instance_attributes is a flag for allowing to use
+ instance attributes that are not listed in a resource agent metadata
+ or for allowing to not use the instance_attributes that are required in
+ resource agent metadata
+ bool use_default_operations is a flag for stopping stopping of adding
+ default cib operations (specified in a resource agent)
+ bool ensure_disabled is flag that keeps resource in target-role "Stopped"
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND
+ """
+ resource_agent = get_agent(
+ env.report_processor,
+ env.cmd_runner(),
+ resource_agent_name,
+ allow_absent_agent,
+ )
+ with resource_environment(
+ env,
+ wait,
+ [resource_id],
+ ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+ ) as resources_section:
+ env.report_processor.process_list(_validate_special_cases(
+ get_nodes(env.get_corosync_conf(), resources_section),
+ resource_agent,
+ resources_section,
+ resource_id,
+ meta_attributes,
+ instance_attributes,
+ allow_not_suitable_command
+ ))
+
+ primitive_element = resource.primitive.create(
+ env.report_processor, resources_section,
+ resource_id, resource_agent,
+ operations, meta_attributes, instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ if ensure_disabled:
+ resource.common.disable(primitive_element)
+
+def _create_as_clone_common(
+ tag, env, resource_id, resource_agent_name,
+ operations, meta_attributes, instance_attributes, clone_meta_options,
+ allow_absent_agent=False,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+ ensure_disabled=False,
+ wait=False,
+ allow_not_suitable_command=False,
+):
+ """
+ Create resource in some kind of clone (clone or master).
+
+ Currently the only difference between commands "create_as_clone" and
+ "create_as_master" is in tag. So the commands create_as_clone and
+ create_as_master are created by passing tag with partial.
+
+ string tag is any clone tag. Currently it can be "clone" or "master".
+ LibraryEnvironment env provides all for communication with externals
+ string resource_id is identifier of resource
+ string resource_agent_name contains name for the identification of agent
+ list of dict operations contains attributes for each entered operation
+ dict meta_attributes contains attributes for primitive/meta_attributes
+ dict instance_attributes contains attributes for
+ primitive/instance_attributes
+ dict clone_meta_options contains attributes for clone/meta_attributes
+ bool allow_absent_agent is a flag for allowing agent that is not installed
+ in a system
+ bool allow_invalid_operation is a flag for allowing to use operations that
+ are not listed in a resource agent metadata
+ bool allow_invalid_instance_attributes is a flag for allowing to use
+ instance attributes that are not listed in a resource agent metadata
+ or for allowing to not use the instance_attributes that are required in
+ resource agent metadata
+ bool use_default_operations is a flag for stopping stopping of adding
+ default cib operations (specified in a resource agent)
+ bool ensure_disabled is flag that keeps resource in target-role "Stopped"
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND
+ """
+ resource_agent = get_agent(
+ env.report_processor,
+ env.cmd_runner(),
+ resource_agent_name,
+ allow_absent_agent,
+ )
+ with resource_environment(
+ env,
+ wait,
+ [resource_id],
+ (
+ ensure_disabled
+ or
+ resource.common.are_meta_disabled(meta_attributes)
+ or
+ resource.common.is_clone_deactivated_by_meta(clone_meta_options)
+ )
+ ) as resources_section:
+ env.report_processor.process_list(_validate_special_cases(
+ get_nodes(env.get_corosync_conf(), resources_section),
+ resource_agent,
+ resources_section,
+ resource_id,
+ meta_attributes,
+ instance_attributes,
+ allow_not_suitable_command
+ ))
+
+ primitive_element = resource.primitive.create(
+ env.report_processor, resources_section,
+ resource_id, resource_agent,
+ operations, meta_attributes, instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ clone_element = resource.clone.append_new(
+ tag,
+ resources_section,
+ primitive_element,
+ clone_meta_options,
+ )
+ if ensure_disabled:
+ resource.common.disable(clone_element)
+
+def create_in_group(
+ env, resource_id, resource_agent_name, group_id,
+ operations, meta_attributes, instance_attributes,
+ allow_absent_agent=False,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+ ensure_disabled=False,
+ adjacent_resource_id=None,
+ put_after_adjacent=False,
+ wait=False,
+ allow_not_suitable_command=False,
+):
+ """
+ Create resource in a cib and put it into defined group
+
+ LibraryEnvironment env provides all for communication with externals
+ string resource_id is identifier of resource
+ string resource_agent_name contains name for the identification of agent
+ string group_id is identificator for group to put primitive resource inside
+ list of dict operations contains attributes for each entered operation
+ dict meta_attributes contains attributes for primitive/meta_attributes
+ bool allow_absent_agent is a flag for allowing agent that is not installed
+ in a system
+ bool allow_invalid_operation is a flag for allowing to use operations that
+ are not listed in a resource agent metadata
+ bool allow_invalid_instance_attributes is a flag for allowing to use
+ instance attributes that are not listed in a resource agent metadata
+ or for allowing to not use the instance_attributes that are required in
+ resource agent metadata
+ bool use_default_operations is a flag for stopping stopping of adding
+ default cib operations (specified in a resource agent)
+ bool ensure_disabled is flag that keeps resource in target-role "Stopped"
+ string adjacent_resource_id identify neighbor of a newly created resource
+ bool put_after_adjacent is flag to put a newly create resource befor/after
+ adjacent resource
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND
+ """
+ resource_agent = get_agent(
+ env.report_processor,
+ env.cmd_runner(),
+ resource_agent_name,
+ allow_absent_agent,
+ )
+ with resource_environment(
+ env,
+ wait,
+ [resource_id],
+ ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+ ) as resources_section:
+ env.report_processor.process_list(_validate_special_cases(
+ get_nodes(env.get_corosync_conf(), resources_section),
+ resource_agent,
+ resources_section,
+ resource_id,
+ meta_attributes,
+ instance_attributes,
+ allow_not_suitable_command
+ ))
+
+ primitive_element = resource.primitive.create(
+ env.report_processor, resources_section,
+ resource_id, resource_agent,
+ operations, meta_attributes, instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ if ensure_disabled:
+ resource.common.disable(primitive_element)
+ validate_id(group_id, "group name")
+ resource.group.place_resource(
+ resource.group.provide_group(resources_section, group_id),
+ primitive_element,
+ adjacent_resource_id,
+ put_after_adjacent,
+ )
+
+create_as_clone = partial(_create_as_clone_common, resource.clone.TAG_CLONE)
+create_as_master = partial(_create_as_clone_common, resource.clone.TAG_MASTER)
+
+def create_into_bundle(
+ env, resource_id, resource_agent_name,
+ operations, meta_attributes, instance_attributes,
+ bundle_id,
+ allow_absent_agent=False,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+ ensure_disabled=False,
+ wait=False,
+ allow_not_suitable_command=False,
+):
+ """
+ Create a new resource in a cib and put it into an existing bundle
+
+ LibraryEnvironment env provides all for communication with externals
+ string resource_id is identifier of resource
+ string resource_agent_name contains name for the identification of agent
+ list of dict operations contains attributes for each entered operation
+ dict meta_attributes contains attributes for primitive/meta_attributes
+ dict instance_attributes contains attributes for
+ primitive/instance_attributes
+ string bundle_id is id of an existing bundle to put the created resource in
+ bool allow_absent_agent is a flag for allowing agent that is not installed
+ in a system
+ bool allow_invalid_operation is a flag for allowing to use operations that
+ are not listed in a resource agent metadata
+ bool allow_invalid_instance_attributes is a flag for allowing to use
+ instance attributes that are not listed in a resource agent metadata
+ or for allowing to not use the instance_attributes that are required in
+ resource agent metadata
+ bool use_default_operations is a flag for stopping stopping of adding
+ default cib operations (specified in a resource agent)
+ bool ensure_disabled is flag that keeps resource in target-role "Stopped"
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND
+ """
+ resource_agent = get_agent(
+ env.report_processor,
+ env.cmd_runner(),
+ resource_agent_name,
+ allow_absent_agent,
+ )
+ with resource_environment(
+ env,
+ wait,
+ [resource_id],
+ disabled_after_wait=ensure_disabled,
+ required_cib_version=(2, 8, 0)
+ ) as resources_section:
+ env.report_processor.process_list(_validate_special_cases(
+ get_nodes(env.get_corosync_conf(), resources_section),
+ resource_agent,
+ resources_section,
+ resource_id,
+ meta_attributes,
+ instance_attributes,
+ allow_not_suitable_command
+ ))
+
+ primitive_element = resource.primitive.create(
+ env.report_processor, resources_section,
+ resource_id, resource_agent,
+ operations, meta_attributes, instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ if ensure_disabled:
+ resource.common.disable(primitive_element)
+ resource.bundle.add_resource(
+ find_element_by_tag_and_id(
+ "bundle", resources_section, bundle_id
+ ),
+ primitive_element
+ )
+
+def bundle_create(
+ env, bundle_id, container_type, container_options=None,
+ network_options=None, port_map=None, storage_map=None,
+ force_options=False,
+ wait=False,
+):
+ """
+ Create a new bundle containing no resources
+
+ LibraryEnvironment env -- provides communication with externals
+ string bundle_id -- id of the new bundle
+ string container_type -- container engine name (docker, lxc...)
+ dict container_options -- container options
+ dict network_options -- network options
+ list of dict port_map -- list of port mapping options
+ list of dict storage_map -- list of storage mapping options
+ bool force_options -- return warnings instead of forceable errors
+ mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+ """
+ container_options = container_options or {}
+ network_options = network_options or {}
+ port_map = port_map or []
+ storage_map = storage_map or []
+
+ with resource_environment(
+ env,
+ wait,
+ [bundle_id],
+ # bundles are always enabled, currently there is no way to disable them
+ disabled_after_wait=False,
+ required_cib_version=(2, 8, 0)
+ ) as resources_section:
+ id_provider = IdProvider(resources_section)
+ env.report_processor.process_list(
+ resource.bundle.validate_new(
+ id_provider,
+ bundle_id,
+ container_type,
+ container_options,
+ network_options,
+ port_map,
+ storage_map,
+ force_options
+ )
+ )
+ resource.bundle.append_new(
+ resources_section,
+ id_provider,
+ bundle_id,
+ container_type,
+ container_options,
+ network_options,
+ port_map,
+ storage_map
+ )
+
+def bundle_update(
+ env, bundle_id, container_options=None, network_options=None,
+ port_map_add=None, port_map_remove=None, storage_map_add=None,
+ storage_map_remove=None,
+ force_options=False,
+ wait=False,
+):
+ """
+ Modify an existing bundle (does not touch encapsulated resources)
+
+ LibraryEnvironment env -- provides communication with externals
+ string bundle_id -- id of the bundle to modify
+ dict container_options -- container options to modify
+ dict network_options -- network options to modify
+ list of dict port_map_add -- list of port mapping options to add
+ list of string port_map_remove -- list of port mapping ids to remove
+ list of dict storage_map_add -- list of storage mapping options to add
+ list of string storage_map_remove -- list of storage mapping ids to remove
+ bool force_options -- return warnings instead of forceable errors
+ mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+ """
+ container_options = container_options or {}
+ network_options = network_options or {}
+ port_map_add = port_map_add or []
+ port_map_remove = port_map_remove or []
+ storage_map_add = storage_map_add or []
+ storage_map_remove = storage_map_remove or []
+
+ with resource_environment(
+ env,
+ wait,
+ [bundle_id],
+ # bundles are always enabled, currently there is no way to disable them
+ disabled_after_wait=False,
+ required_cib_version=(2, 8, 0)
+ ) as resources_section:
+ id_provider = IdProvider(resources_section)
+ bundle_element = find_element_by_tag_and_id(
+ resource.bundle.TAG,
+ resources_section,
+ bundle_id
+ )
+ env.report_processor.process_list(
+ resource.bundle.validate_update(
+ id_provider,
+ bundle_element,
+ container_options,
+ network_options,
+ port_map_add,
+ port_map_remove,
+ storage_map_add,
+ storage_map_remove,
+ force_options
+ )
+ )
+ resource.bundle.update(
+ id_provider,
+ bundle_element,
+ container_options,
+ network_options,
+ port_map_add,
+ port_map_remove,
+ storage_map_add,
+ storage_map_remove
+ )
+
+def disable(env, resource_ids, wait):
+ """
+ Disallow specified resource to be started by the cluster
+ LibraryEnvironment env --
+ strings resource_ids -- ids of the resources to be disabled
+ mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+ """
+ with resource_environment(
+ env, wait, resource_ids, True
+ ) as resources_section:
+ resource_el_list = _find_resources_or_raise(
+ resources_section,
+ resource_ids
+ )
+ env.report_processor.process_list(
+ _resource_list_enable_disable(
+ resource_el_list,
+ resource.common.disable,
+ env.get_cluster_state()
+ )
+ )
+
+def enable(env, resource_ids, wait):
+ """
+ Allow specified resource to be started by the cluster
+ LibraryEnvironment env --
+ strings resource_ids -- ids of the resources to be enabled
+ mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+ """
+ with resource_environment(
+ env, wait, resource_ids, False
+ ) as resources_section:
+ resource_el_list = _find_resources_or_raise(
+ resources_section,
+ resource_ids,
+ resource.common.find_resources_to_enable
+ )
+ env.report_processor.process_list(
+ _resource_list_enable_disable(
+ resource_el_list,
+ resource.common.enable,
+ env.get_cluster_state()
+ )
+ )
+
+def _resource_list_enable_disable(resource_el_list, func, cluster_state):
+ report_list = []
+ for resource_el in resource_el_list:
+ res_id = resource_el.attrib["id"]
+ try:
+ if not is_resource_managed(cluster_state, res_id):
+ report_list.append(reports.resource_is_unmanaged(res_id))
+ func(resource_el)
+ except ResourceNotFound:
+ report_list.append(
+ reports.id_not_found(
+ res_id,
+ id_description="resource/clone/master/group"
+ )
+ )
+ return report_list
+
+def unmanage(env, resource_ids, with_monitor=False):
+ """
+ Set specified resources not to be managed by the cluster
+ LibraryEnvironment env --
+ strings resource_ids -- ids of the resources to become unmanaged
+ bool with_monitor -- disable resources' monitor operations
+ """
+ with resource_environment(env) as resources_section:
+ resource_el_list = _find_resources_or_raise(
+ resources_section,
+ resource_ids,
+ resource.common.find_resources_to_unmanage
+ )
+ primitives = []
+
+ for resource_el in resource_el_list:
+ resource.common.unmanage(resource_el)
+ if with_monitor:
+ primitives.extend(
+ resource.common.find_primitives(resource_el)
+ )
+
+ for resource_el in set(primitives):
+ for op in operations.get_resource_operations(
+ resource_el,
+ ["monitor"]
+ ):
+ operations.disable(op)
+
+def manage(env, resource_ids, with_monitor=False):
+ """
+ Set specified resource to be managed by the cluster
+ LibraryEnvironment env --
+ strings resource_ids -- ids of the resources to become managed
+ bool with_monitor -- enable resources' monitor operations
+ """
+ with resource_environment(env) as resources_section:
+ report_list = []
+ resource_el_list = _find_resources_or_raise(
+ resources_section,
+ resource_ids,
+ resource.common.find_resources_to_manage
+ )
+ primitives = []
+
+ for resource_el in resource_el_list:
+ resource.common.manage(resource_el)
+ primitives.extend(
+ resource.common.find_primitives(resource_el)
+ )
+
+ for resource_el in sorted(
+ set(primitives),
+ key=lambda element: element.get("id", "")
+ ):
+ op_list = operations.get_resource_operations(
+ resource_el,
+ ["monitor"]
+ )
+ if with_monitor:
+ for op in op_list:
+ operations.enable(op)
+ else:
+ monitor_enabled = False
+ for op in op_list:
+ if operations.is_enabled(op):
+ monitor_enabled = True
+ break
+ if op_list and not monitor_enabled:
+ # do not advise enabling monitors if there are none defined
+ report_list.append(
+ reports.resource_managed_no_monitor_enabled(
+ resource_el.get("id", "")
+ )
+ )
+
+ env.report_processor.process_list(report_list)
+
+def _find_resources_or_raise(
+ resources_section, resource_ids, additional_search=None
+):
+ if not additional_search:
+ additional_search = lambda x: [x]
+ report_list = []
+ resource_el_list = []
+ resource_tags = (
+ resource.clone.ALL_TAGS
+ +
+ [resource.group.TAG, resource.primitive.TAG]
+ )
+ for res_id in resource_ids:
+ try:
+ resource_el_list.extend(
+ additional_search(
+ find_element_by_tag_and_id(
+ resource_tags,
+ resources_section,
+ res_id,
+ id_description="resource/clone/master/group"
+ )
+ )
+ )
+ except LibraryError as e:
+ report_list.extend(e.args)
+ if report_list:
+ raise LibraryError(*report_list)
+ return resource_el_list
diff --git a/pcs/lib/commands/resource_agent.py b/pcs/lib/commands/resource_agent.py
index 933da49..e3c6acf 100644
--- a/pcs/lib/commands/resource_agent.py
+++ b/pcs/lib/commands/resource_agent.py
@@ -95,9 +95,20 @@ def _complete_agent_list(
agent_list.append(agent_metadata.get_description_info())
else:
agent_list.append(agent_metadata.get_name_info())
- except resource_agent.UnableToGetAgentMetadata:
- # if we cannot get valid metadata, it's not a resource agent and
- # we don't return it in the list
+ except resource_agent.ResourceAgentError:
+ #we don't return it in the list:
+ #
+ #UnableToGetAgentMetadata - if we cannot get valid metadata, it's
+ #not a resource agent
+ #
+ #InvalidResourceAgentName - invalid name cannot be used with a new
+ #resource. The list of names is gained from "crm_resource" whilst
+ #pcs is doing the validation. So there can be a name that pcs does
+ #not recognize as valid.
+ #
+ #Providing a warning is not the way (currently). Other components
+ #read this list and do not expect warnings there. Using the stderr
+ #(to separate warnings) is currently difficult.
pass
return agent_list
@@ -111,5 +122,6 @@ def describe_agent(lib_env, agent_name):
lib_env.report_processor,
lib_env.cmd_runner(),
agent_name,
+ absent_agent_supported=False
)
return agent.get_full_info()
diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
index 8cc9eda..6d0a4f3 100644
--- a/pcs/lib/commands/sbd.py
+++ b/pcs/lib/commands/sbd.py
@@ -33,6 +33,11 @@ from pcs.lib.node import (
NodeAddressesList,
NodeNotFound
)
+from pcs.lib.validate import (
+ names_in,
+ run_collection_of_option_validators,
+ value_nonnegative_integer,
+)
def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
@@ -46,7 +51,7 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
report_item_list = []
unsupported_sbd_option_list = [
- "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"
+ "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER", "SBD_DEVICE"
]
allowed_sbd_options = [
"SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
@@ -54,12 +59,12 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
for sbd_opt in sbd_config:
if sbd_opt in unsupported_sbd_option_list:
report_item_list.append(reports.invalid_option(
- sbd_opt, allowed_sbd_options, None
+ [sbd_opt], allowed_sbd_options, None
))
elif sbd_opt not in allowed_sbd_options:
report_item_list.append(reports.invalid_option(
- sbd_opt,
+ [sbd_opt],
allowed_sbd_options,
None,
Severities.WARNING if allow_unknown_opts else Severities.ERROR,
@@ -69,7 +74,7 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
report_item = reports.invalid_option_value(
"SBD_WATCHDOG_TIMEOUT",
sbd_config["SBD_WATCHDOG_TIMEOUT"],
- "nonnegative integer"
+ "a non-negative integer"
)
try:
if int(sbd_config["SBD_WATCHDOG_TIMEOUT"]) < 0:
@@ -80,38 +85,89 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
return report_item_list
-def _get_full_watchdog_list(node_list, default_watchdog, watchdog_dict):
+def _validate_watchdog_dict(watchdog_dict):
"""
- Validate if all nodes in watchdog_dict does exist and returns dictionary
- where keys are nodes and value is corresponding watchdog.
- Raises LibraryError if any of nodes doesn't belong to cluster.
+ Validates if all watchdogs are specified by absolute path.
+ Returns list of ReportItem.
- node_list -- NodeAddressesList
- default_watchdog -- watchdog for nodes which are not specified
- in watchdog_dict
- watchdog_dict -- dictionary with node names as keys and value as watchdog
+ watchdog_dict -- dictionary with NodeAddresses as keys and value as watchdog
"""
- full_dict = dict([(node, default_watchdog) for node in node_list])
- report_item_list = []
+ return [
+ reports.invalid_watchdog_path(watchdog)
+ for watchdog in watchdog_dict.values()
+ if not watchdog or not os.path.isabs(watchdog)
+ ]
+
- for node_name, watchdog in watchdog_dict.items():
- if not watchdog or not os.path.isabs(watchdog):
- report_item_list.append(reports.invalid_watchdog_path(watchdog))
+def _validate_device_dict(node_device_dict):
+ """
+ Validates device list for all nodes. If node is present, it checks if there
+ is at least one device and at max settings.sbd_max_device_num. Also devices
+ have to be specified with absolute path.
+ Returns list of ReportItem
+
+ node_device_dict -- dictionary with NodeAddresses as keys and list of
+ devices as values
+ """
+ report_item_list = []
+ for node, device_list in node_device_dict.items():
+ if not device_list:
+ report_item_list.append(
+ reports.sbd_no_device_for_node(node.label)
+ )
continue
+ elif len(device_list) > settings.sbd_max_device_num:
+ report_item_list.append(reports.sbd_too_many_devices_for_node(
+ node.label, device_list, settings.sbd_max_device_num
+ ))
+ continue
+ for device in device_list:
+ if not device or not os.path.isabs(device):
+ report_item_list.append(
+ reports.sbd_device_path_not_absolute(device, node.label)
+ )
+
+ return report_item_list
+
+
+def _check_node_names_in_cluster(node_list, node_name_list):
+ """
+ Check whenever all node names from node_name_list exists in node_list.
+ Returns list of ReportItem
+
+ node_list -- NodeAddressesList
+ node_name_list -- list of stings
+ """
+ not_existing_node_set = set()
+ for node_name in node_name_list:
try:
- full_dict[node_list.find_by_label(node_name)] = watchdog
+ node_list.find_by_label(node_name)
except NodeNotFound:
- report_item_list.append(reports.node_not_found(node_name))
+ not_existing_node_set.add(node_name)
+
+ return [reports.node_not_found(node) for node in not_existing_node_set]
- if report_item_list:
- raise LibraryError(*report_item_list)
- return full_dict
+def _get_full_node_dict(node_list, node_value_dict, default_value):
+ """
+ Returns dictionary where keys NodeAdressesof all nodes in cluster and value
+ is obtained from node_value_dict for node name, or default+value if node
+ nade is not specified in node_value_dict.
+
+ node_list -- NodeAddressesList
+ node_value_dict -- dictionary, keys: node names, values: some velue
+ default_value -- some default value
+ """
+ return dict([
+ (node, node_value_dict.get(node.label, default_value))
+ for node in node_list
+ ])
def enable_sbd(
- lib_env, default_watchdog, watchdog_dict, sbd_options,
- allow_unknown_opts=False, ignore_offline_nodes=False
+ lib_env, default_watchdog, watchdog_dict, sbd_options,
+ default_device_list=None, node_device_dict=None, allow_unknown_opts=False,
+ ignore_offline_nodes=False,
):
"""
Enable SBD on all nodes in cluster.
@@ -119,44 +175,64 @@ def enable_sbd(
lib_env -- LibraryEnvironment
default_watchdog -- watchdog for nodes which are not specified in
watchdog_dict. Uses default value from settings if None.
- watchdog_dict -- dictionary with NodeAddresses as keys and watchdog path
+ watchdog_dict -- dictionary with node names as keys and watchdog path
as value
sbd_options -- dictionary in format: <SBD config option>: <value>
+ default_device_list -- list of devices for all nodes
+ node_device_dict -- dictionary with node names as keys and list of devices
+ as value
allow_unknown_opts -- if True, accept also unknown options.
ignore_offline_nodes -- if True, omit offline nodes
"""
node_list = _get_cluster_nodes(lib_env)
-
+ using_devices = not (
+ default_device_list is None and node_device_dict is None
+ )
+ if default_device_list is None:
+ default_device_list = []
+ if node_device_dict is None:
+ node_device_dict = {}
if not default_watchdog:
default_watchdog = settings.sbd_watchdog_default
+ sbd_options = dict([(opt.upper(), val) for opt, val in sbd_options.items()])
- # input validation begin
- full_watchdog_dict = _get_full_watchdog_list(
- node_list, default_watchdog, watchdog_dict
+ full_watchdog_dict = _get_full_node_dict(
+ node_list, watchdog_dict, default_watchdog
+ )
+ full_device_dict = _get_full_node_dict(
+ node_list, node_device_dict, default_device_list
)
- # config validation
- sbd_options = dict([(opt.upper(), val) for opt, val in sbd_options.items()])
lib_env.report_processor.process_list(
+ _check_node_names_in_cluster(
+ node_list, watchdog_dict.keys() + node_device_dict.keys()
+ )
+ +
+ _validate_watchdog_dict(full_watchdog_dict)
+ +
+ _validate_device_dict(full_device_dict) if using_devices else []
+ +
_validate_sbd_options(sbd_options, allow_unknown_opts)
)
- # check nodes status
online_nodes = _get_online_nodes(lib_env, node_list, ignore_offline_nodes)
- for node in list(full_watchdog_dict):
- if node not in online_nodes:
- full_watchdog_dict.pop(node, None)
- # input validation end
+
+ node_data_dict = {}
+ for node in online_nodes:
+ node_data_dict[node] = {
+ "watchdog": full_watchdog_dict[node],
+ "device_list": full_device_dict[node] if using_devices else [],
+ }
# check if SBD can be enabled
sbd.check_sbd_on_all_nodes(
lib_env.report_processor,
lib_env.node_communicator(),
- full_watchdog_dict
+ node_data_dict,
)
# enable ATB if needed
- if not lib_env.is_cman_cluster:
+ if not lib_env.is_cman_cluster and not using_devices:
corosync_conf = lib_env.get_corosync_conf()
if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf):
lib_env.report_processor.process(reports.sbd_requires_atb())
@@ -173,7 +249,8 @@ def enable_sbd(
lib_env.node_communicator(),
online_nodes,
config,
- full_watchdog_dict
+ full_watchdog_dict,
+ full_device_dict,
)
# remove cluster prop 'stonith_watchdog_timeout'
@@ -280,9 +357,11 @@ def get_cluster_sbd_status(lib_env):
def get_sbd_status(node):
try:
status_list.append({
- "node": node,
+ "node": node.label,
"status": json.loads(
- sbd.check_sbd(lib_env.node_communicator(), node, "")
+ # here we just need info about sbd service,
+ # therefore watchdog and device list is empty
+ sbd.check_sbd(lib_env.node_communicator(), node, "", [])
)["sbd"]
})
successful_node_list.append(node)
@@ -307,7 +386,7 @@ def get_cluster_sbd_status(lib_env):
for node in node_list:
if node not in successful_node_list:
status_list.append({
- "node": node,
+ "node": node.label,
"status": {
"installed": None,
"enabled": None,
@@ -341,7 +420,7 @@ def get_cluster_sbd_config(lib_env):
def get_sbd_config(node):
try:
config_list.append({
- "node": node,
+ "node": node.label,
"config": environment_file_to_dict(
sbd.get_sbd_config(lib_env.node_communicator(), node)
)
@@ -373,13 +452,18 @@ def get_cluster_sbd_config(lib_env):
for node in node_list:
if node not in successful_node_list:
config_list.append({
- "node": node,
+ "node": node.label,
"config": None
})
return config_list
def get_local_sbd_config(lib_env):
+ """
+ Returns local SBD config as dictionary.
+
+ lib_env -- LibraryEnvironment
+ """
return environment_file_to_dict(sbd.get_local_sbd_config())
@@ -389,3 +473,104 @@ def _get_cluster_nodes(lib_env):
else:
return lib_env.get_corosync_conf().get_nodes()
+
+def initialize_block_devices(lib_env, device_list, option_dict):
+ """
+ Initialize SBD devices in device_list with options_dict.
+
+ lib_env -- LibraryEnvironment
+ device_list -- list of strings
+ option_dict -- dictionary
+ """
+ report_item_list = []
+ if not device_list:
+ report_item_list.append(reports.required_option_is_missing(["device"]))
+
+ supported_options = sbd.DEVICE_INITIALIZATION_OPTIONS_MAPPING.keys()
+
+ report_item_list += names_in(supported_options, option_dict.keys())
+ validator_list = [
+ value_nonnegative_integer(key)
+ for key in supported_options
+ ]
+
+ report_item_list += run_collection_of_option_validators(
+ option_dict, validator_list
+ )
+
+ lib_env.report_processor.process_list(report_item_list)
+ sbd.initialize_block_devices(
+ lib_env.report_processor, lib_env.cmd_runner(), device_list, option_dict
+ )
+
+
+def get_local_devices_info(lib_env, dump=False):
+ """
+ Returns list of local devices info in format:
+ {
+ "device": <device_path>,
+ "list": <output of 'sbd list' command>,
+ "dump": <output of 'sbd dump' command> if dump is True, None otherwise
+ }
+ If sbd is not enabled, empty list will be returned.
+
+ lib_env -- LibraryEnvironment
+ dump -- if True returns also output of command 'sbd dump'
+ """
+ if not sbd.is_sbd_enabled(lib_env.cmd_runner()):
+ return []
+ device_list = sbd.get_local_sbd_device_list()
+ report_item_list = []
+ output = []
+ for device in device_list:
+ obj = {
+ "device": device,
+ "list": None,
+ "dump": None,
+ }
+ try:
+ obj["list"] = sbd.get_device_messages_info(
+ lib_env.cmd_runner(), device
+ )
+ if dump:
+ obj["dump"] = sbd.get_device_sbd_header_dump(
+ lib_env.cmd_runner(), device
+ )
+ except LibraryError as e:
+ report_item_list += e.args
+
+ output.append(obj)
+
+ for report_item in report_item_list:
+ report_item.severity = Severities.WARNING
+ lib_env.report_processor.process_list(report_item_list)
+ return output
+
+
+def set_message(lib_env, device, node_name, message):
+ """
+ Set message on device for node_name.
+
+ lib_env -- LibrayEnvironment
+ device -- string, absolute path to device
+ node_name -- string
+ message -- string, mesage type, should be one of settings.sbd_message_types
+ """
+ report_item_list = []
+ missing_options = []
+ if not device:
+ missing_options.append("device")
+ if not node_name:
+ missing_options.append("node")
+ if missing_options:
+ report_item_list.append(
+ reports.required_option_is_missing(missing_options)
+ )
+ supported_messages = settings.sbd_message_types
+ if message not in supported_messages:
+ report_item_list.append(
+ reports.invalid_option_value("message", message, supported_messages)
+ )
+ lib_env.report_processor.process_list(report_item_list)
+ sbd.set_message(lib_env.cmd_runner(), device, node_name, message)
+
diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
new file mode 100644
index 0000000..22bb798
--- /dev/null
+++ b/pcs/lib/commands/stonith.py
@@ -0,0 +1,143 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.lib.resource_agent import find_valid_stonith_agent_by_name as get_agent
+from pcs.lib.cib import resource
+from pcs.lib.cib.resource.common import are_meta_disabled
+from pcs.lib.pacemaker.values import validate_id
+from pcs.lib.commands.resource import resource_environment
+
+def create(
+ env, stonith_id, stonith_agent_name,
+ operations, meta_attributes, instance_attributes,
+ allow_absent_agent=False,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+ ensure_disabled=False,
+ wait=False,
+):
+ """
+ Create stonith as resource in a cib.
+
+ LibraryEnvironment env provides all for communication with externals
+ string stonith_id is an identifier of stonith resource
+ string stonith_agent_name contains name for the identification of agent
+ list of dict operations contains attributes for each entered operation
+ dict meta_attributes contains attributes for primitive/meta_attributes
+ dict instance_attributes contains attributes for
+ primitive/instance_attributes
+ bool allow_absent_agent is a flag for allowing agent that is not installed
+ in a system
+ bool allow_invalid_operation is a flag for allowing to use operations that
+ are not listed in a stonith agent metadata
+ bool allow_invalid_instance_attributes is a flag for allowing to use
+ instance attributes that are not listed in a stonith agent metadata
+ or for allowing to not use the instance_attributes that are required in
+ stonith agent metadata
+ bool use_default_operations is a flag for stopping stopping of adding
+ default cib operations (specified in a stonith agent)
+ bool ensure_disabled is flag that keeps resource in target-role "Stopped"
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ """
+ stonith_agent = get_agent(
+ env.report_processor,
+ env.cmd_runner(),
+ stonith_agent_name,
+ allow_absent_agent,
+ )
+ if stonith_agent.get_provides_unfencing():
+ meta_attributes["provides"] = "unfencing"
+
+ with resource_environment(
+ env,
+ wait,
+ stonith_id,
+ ensure_disabled or are_meta_disabled(meta_attributes),
+ ) as resources_section:
+ stonith_element = resource.primitive.create(
+ env.report_processor, resources_section,
+ stonith_id, stonith_agent,
+ operations, meta_attributes, instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ if ensure_disabled:
+ resource.common.disable(stonith_element)
+
+def create_in_group(
+ env, stonith_id, stonith_agent_name, group_id,
+ operations, meta_attributes, instance_attributes,
+ allow_absent_agent=False,
+ allow_invalid_operation=False,
+ allow_invalid_instance_attributes=False,
+ use_default_operations=True,
+ ensure_disabled=False,
+ adjacent_resource_id=None,
+ put_after_adjacent=False,
+ wait=False,
+):
+ """
+ Create stonith as resource in a cib and put it into defined group.
+
+ LibraryEnvironment env provides all for communication with externals
+ string stonith_id is an identifier of stonith resource
+ string stonith_agent_name contains name for the identification of agent
+ string group_id is identificator for group to put stonith inside
+ list of dict operations contains attributes for each entered operation
+ dict meta_attributes contains attributes for primitive/meta_attributes
+ dict instance_attributes contains attributes for
+ primitive/instance_attributes
+ bool allow_absent_agent is a flag for allowing agent that is not installed
+ in a system
+ bool allow_invalid_operation is a flag for allowing to use operations that
+ are not listed in a stonith agent metadata
+ bool allow_invalid_instance_attributes is a flag for allowing to use
+ instance attributes that are not listed in a stonith agent metadata
+ or for allowing to not use the instance_attributes that are required in
+ stonith agent metadata
+ bool use_default_operations is a flag for stopping stopping of adding
+ default cib operations (specified in a stonith agent)
+ bool ensure_disabled is flag that keeps resource in target-role "Stopped"
+ string adjacent_resource_id identify neighbor of a newly created stonith
+ bool put_after_adjacent is flag to put a newly create resource befor/after
+ adjacent stonith
+ mixed wait is flag for controlling waiting for pacemaker iddle mechanism
+ """
+ stonith_agent = get_agent(
+ env.report_processor,
+ env.cmd_runner(),
+ stonith_agent_name,
+ allow_absent_agent,
+ )
+ if stonith_agent.get_provides_unfencing():
+ meta_attributes["provides"] = "unfencing"
+
+ with resource_environment(
+ env,
+ wait,
+ stonith_id,
+ ensure_disabled or are_meta_disabled(meta_attributes),
+ ) as resources_section:
+ stonith_element = resource.primitive.create(
+ env.report_processor, resources_section,
+ stonith_id, stonith_agent,
+ operations, meta_attributes, instance_attributes,
+ allow_invalid_operation,
+ allow_invalid_instance_attributes,
+ use_default_operations,
+ )
+ if ensure_disabled:
+ resource.common.disable(stonith_element)
+ validate_id(group_id, "group name")
+ resource.group.place_resource(
+ resource.group.provide_group(resources_section, group_id),
+ stonith_element,
+ adjacent_resource_id,
+ put_after_adjacent,
+ )
diff --git a/pcs/lib/commands/stonith_agent.py b/pcs/lib/commands/stonith_agent.py
index 6257f18..ee4bc26 100644
--- a/pcs/lib/commands/stonith_agent.py
+++ b/pcs/lib/commands/stonith_agent.py
@@ -7,7 +7,6 @@ from __future__ import (
from pcs.lib import resource_agent
from pcs.lib.commands.resource_agent import _complete_agent_list
-from pcs.lib.errors import LibraryError
def list_agents(lib_env, describe=True, search=None):
@@ -32,14 +31,10 @@ def describe_agent(lib_env, agent_name):
Get agent's description (metadata) in a structure
string agent_name name of the agent (not containing "stonith:" prefix)
"""
- try:
- metadata = resource_agent.StonithAgent(
- lib_env.cmd_runner(),
- agent_name
- )
- return metadata.get_full_info()
- except resource_agent.ResourceAgentError as e:
- raise LibraryError(
- resource_agent.resource_agent_error_to_report_item(e)
- )
-
+ agent = resource_agent.find_valid_stonith_agent_by_name(
+ lib_env.report_processor,
+ lib_env.cmd_runner(),
+ agent_name,
+ absent_agent_supported=False
+ )
+ return agent.get_full_info()
diff --git a/pcs/test/tools/test/__init__.py b/pcs/lib/commands/test/resource/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/lib/commands/test/resource/__init__.py
diff --git a/pcs/lib/commands/test/resource/common.py b/pcs/lib/commands/test/resource/common.py
new file mode 100644
index 0000000..ac8cb24
--- /dev/null
+++ b/pcs/lib/commands/test/resource/common.py
@@ -0,0 +1,76 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import logging
+
+import pcs.lib.commands.test.resource.fixture as fixture
+from pcs.lib.env import LibraryEnvironment
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.integration_lib import Runner
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_unittest import TestCase, mock
+
+class CommonResourceTest(TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.runner = Runner()
+ cls.patcher = mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: cls.runner
+ )
+ cls.patcher.start()
+
+ cls.patcher_corosync = mock.patch.object(
+ LibraryEnvironment,
+ "get_corosync_conf_data",
+ lambda self: open(rc("corosync.conf")).read()
+ )
+ cls.patcher_corosync.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.patcher.stop()
+ cls.patcher_corosync.stop()
+
+ def setUp(self):
+ self.env = LibraryEnvironment(
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+ )
+ self.cib_base_file = "cib-empty.xml"
+
+
+class ResourceWithoutStateTest(CommonResourceTest):
+ def assert_command_effect(self, cib_pre, cmd, cib_post, reports=None):
+ self.runner.set_runs(
+ fixture.calls_cib(
+ cib_pre,
+ cib_post,
+ cib_base_file=self.cib_base_file
+ )
+ )
+ cmd()
+ self.env.report_processor.assert_reports(reports if reports else [])
+ self.runner.assert_everything_launched()
+
+
+class ResourceWithStateTest(CommonResourceTest):
+ def assert_command_effect(
+ self, cib_pre, status, cmd, cib_post, reports=None
+ ):
+ self.runner.set_runs(
+ fixture.calls_cib_and_status(
+ cib_pre,
+ status,
+ cib_post,
+ cib_base_file=self.cib_base_file
+ )
+ )
+ cmd()
+ self.env.report_processor.assert_reports(reports if reports else [])
+ self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/fixture.py b/pcs/lib/commands/test/resource/fixture.py
new file mode 100644
index 0000000..f1fe09b
--- /dev/null
+++ b/pcs/lib/commands/test/resource/fixture.py
@@ -0,0 +1,201 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.integration_lib import Call
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.xml import etree_to_str
+
+
+def call_cib_load(cib):
+ return [
+ Call("cibadmin --local --query", cib),
+ ]
+
+def call_cib_push(cib):
+ return [
+ Call(
+ "cibadmin --replace --verbose --xml-pipe --scope configuration",
+ check_stdin=Call.create_check_stdin_xml(cib)
+ ),
+ ]
+
+def call_cib_upgrade():
+ return [
+ Call("cibadmin --upgrade --force"),
+ ]
+
+def call_status(status):
+ return [
+ Call("/usr/sbin/crm_mon --one-shot --as-xml --inactive", status),
+ ]
+
+def call_wait_supported():
+ return [
+ Call("crm_resource -?", "--wait"),
+ ]
+
+def call_wait(timeout, retval=0, stderr=""):
+ return [
+ Call(
+ "crm_resource --wait --timeout={0}".format(timeout),
+ stderr=stderr,
+ returncode=retval
+ ),
+ ]
+
+def call_dummy_metadata():
+ return [
+ Call(
+ "crm_resource --show-metadata ocf:heartbeat:Dummy",
+ open(rc("resource_agent_ocf_heartbeat_dummy.xml")).read()
+ ),
+ ]
+
+def calls_cib(cib_pre, cib_post, cib_base_file=None):
+ return (
+ call_cib_load(cib_resources(cib_pre, cib_base_file=cib_base_file))
+ +
+ call_cib_push(cib_resources(cib_post, cib_base_file=cib_base_file))
+ )
+
+def calls_cib_and_status(cib_pre, status, cib_post, cib_base_file=None):
+ return (
+ call_cib_load(cib_resources(cib_pre, cib_base_file=cib_base_file))
+ +
+ call_status(state_complete(status))
+ +
+ call_cib_push(cib_resources(cib_post, cib_base_file=cib_base_file))
+ )
+
+def calls_cib_load_and_upgrade(cib_old_version):
+ return (
+ call_cib_load(cib_resources(cib_old_version))
+ +
+ call_cib_upgrade()
+ )
+
+
+
+def cib_resources(cib_resources_xml, cib_base_file=None):
+ cib_xml = open(rc(cib_base_file or "cib-empty.xml")).read()
+ cib = etree.fromstring(cib_xml)
+ resources_section = cib.find(".//resources")
+ for child in etree.fromstring(cib_resources_xml):
+ resources_section.append(child)
+ return etree_to_str(cib)
+
+
+def state_complete(resource_status_xml):
+ status = etree.parse(rc("crm_mon.minimal.xml")).getroot()
+ resource_status = etree.fromstring(resource_status_xml)
+ for resource in resource_status.xpath(".//resource"):
+ _default_element_attributes(
+ resource,
+ {
+ "active": "true",
+ "managed": "true",
+ "failed": "false",
+ "failure_ignored": "false",
+ "nodes_running_on": "1",
+ "orphaned": "false",
+ "resource_agent": "ocf::heartbeat:Dummy",
+ "role": "Started",
+ }
+ )
+ for clone in resource_status.xpath(".//clone"):
+ _default_element_attributes(
+ clone,
+ {
+ "failed": "false",
+ "failure_ignored": "false",
+ }
+ )
+ for bundle in resource_status.xpath(".//bundle"):
+ _default_element_attributes(
+ bundle,
+ {
+ "type": "docker",
+ "image": "image:name",
+ "unique": "false",
+ "failed": "false",
+ }
+ )
+ status.append(resource_status)
+ return etree_to_str(status)
+
+def _default_element_attributes(element, default_attributes):
+ for name, value in default_attributes.items():
+ if name not in element.attrib:
+ element.attrib[name] = value
+
+
+def report_not_found(res_id, context_type=""):
+ return (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "context_type": context_type,
+ "context_id": "",
+ "id": res_id,
+ "id_description": "resource/clone/master/group",
+ },
+ None
+ )
+
+def report_resource_not_running(resource, severity=severities.INFO):
+ return (
+ severity,
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ {
+ "resource_id": resource,
+ },
+ None
+ )
+
+def report_resource_running(resource, roles, severity=severities.INFO):
+ return (
+ severity,
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ {
+ "resource_id": resource,
+ "roles_with_nodes": roles,
+ },
+ None
+ )
+
+def report_unexpected_element(element_id, elemet_type, expected_types):
+ return (
+ severities.ERROR,
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ {
+ "id": element_id,
+ "expected_types": expected_types,
+ "current_type": elemet_type,
+ },
+ None
+ )
+
+def report_not_for_bundles(element_id):
+ return report_unexpected_element(
+ element_id,
+ "bundle",
+ ["clone", "master", "group", "primitive"]
+ )
+
+def report_wait_for_idle_timed_out(reason):
+ return (
+ severities.ERROR,
+ report_codes.WAIT_FOR_IDLE_TIMED_OUT,
+ {
+ "reason": reason.strip(),
+ },
+ None
+ )
diff --git a/pcs/lib/commands/test/resource/test_bundle_create.py b/pcs/lib/commands/test/resource/test_bundle_create.py
new file mode 100644
index 0000000..b9922d8
--- /dev/null
+++ b/pcs/lib/commands/test/resource/test_bundle_create.py
@@ -0,0 +1,1152 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from textwrap import dedent
+
+from pcs.common import report_codes
+from pcs.lib.commands import resource
+from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
+import pcs.lib.commands.test.resource.fixture as fixture
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import skip_unless_pacemaker_supports_bundle
+
+
+class CommonTest(ResourceWithoutStateTest):
+ fixture_cib_pre = "<resources />"
+ fixture_resources_bundle_simple = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+
+ def setUp(self):
+ super(CommonTest, self).setUp()
+ self.cib_base_file = "cib-empty-2.8.xml"
+
+ def fixture_cib_resources(self, cib):
+ return fixture.cib_resources(cib, cib_base_file=self.cib_base_file)
+
+
+class MinimalCreate(CommonTest):
+ def test_success(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", }
+ ),
+ self.fixture_resources_bundle_simple
+ )
+
+ def test_errors(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(self.env, "B#1", "nonsense"),
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "invalid_character": "#",
+ "id": "B#1",
+ "id_description": "bundle name",
+ "is_first_char": False,
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "container type",
+ "option_value": "nonsense",
+ "allowed_values": ("docker", ),
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_cib_upgrade(self):
+ self.runner.set_runs(
+ fixture.calls_cib_load_and_upgrade(self.fixture_cib_pre)
+ +
+ fixture.calls_cib(
+ self.fixture_cib_pre,
+ self.fixture_resources_bundle_simple,
+ cib_base_file=self.cib_base_file
+ )
+ )
+
+ resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", }
+ )
+
+ self.env.report_processor.assert_reports([
+ (
+ severities.INFO,
+ report_codes.CIB_UPGRADE_SUCCESSFUL,
+ {
+ },
+ None
+ ),
+ ])
+ self.runner.assert_everything_launched()
+
+
+
+class CreateDocker(CommonTest):
+ allowed_options = [
+ "image",
+ "masters",
+ "network",
+ "options",
+ "replicas",
+ "replicas-per-host",
+ "run-command",
+ ]
+
+ def test_minimal(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", }
+ ),
+ self.fixture_resources_bundle_simple
+ )
+
+ def test_all_options(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "pcs:test",
+ "masters": "0",
+ "network": "extra network settings",
+ "options": "extra options",
+ "run-command": "/bin/true",
+ "replicas": "4",
+ "replicas-per-host": "2",
+ }
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker
+ image="pcs:test"
+ masters="0"
+ network="extra network settings"
+ options="extra options"
+ replicas="4"
+ replicas-per-host="2"
+ run-command="/bin/true"
+ />
+ </bundle>
+ </resources>
+ """
+ )
+
+ def test_options_errors(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "replicas-per-host": "0",
+ "replicas": "0",
+ "masters": "-1",
+ },
+ force_options=True
+ ),
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_type": "container",
+ "option_names": ["image", ],
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "masters",
+ "option_value": "-1",
+ "allowed_values": "a non-negative integer",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "replicas",
+ "option_value": "0",
+ "allowed_values": "a positive integer",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "replicas-per-host",
+ "option_value": "0",
+ "allowed_values": "a positive integer",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_empty_image(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "",
+ },
+ force_options=True
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "image",
+ "option_value": "",
+ "allowed_values": "image name",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_unknow_option(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "pcs:test",
+ "extra": "option",
+ }
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "container",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_unknow_option_forced(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "pcs:test",
+ "extra": "option",
+ },
+ force_options=True
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" extra="option" />
+ </bundle>
+ </resources>
+ """,
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "container",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ ]
+ )
+
+
+class CreateWithNetwork(CommonTest):
+ allowed_options = [
+ "control-port",
+ "host-interface",
+ "host-netmask",
+ "ip-range-start",
+ ]
+
+ def test_no_options(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ network_options={}
+ ),
+ self.fixture_resources_bundle_simple
+ )
+
+ def test_all_options(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ network_options={
+ "control-port": "12345",
+ "host-interface": "eth0",
+ "host-netmask": "24",
+ "ip-range-start": "192.168.100.200",
+ }
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network
+ control-port="12345"
+ host-interface="eth0"
+ host-netmask="24"
+ ip-range-start="192.168.100.200"
+ />
+ </bundle>
+ </resources>
+ """
+ )
+
+ def test_options_errors(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ network_options={
+ "control-port": "0",
+ "host-netmask": "abc",
+ "extra": "option",
+ }
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "control-port",
+ "option_value": "0",
+ "allowed_values": "a port number (1-65535)",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "host-netmask",
+ "option_value": "abc",
+ "allowed_values": "a number of bits of the mask (1-32)",
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "network",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_options_forced(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "pcs:test",
+ },
+ network_options={
+ "host-netmask": "abc",
+ "extra": "option",
+ },
+ force_options=True
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-netmask="abc" extra="option" />
+ </bundle>
+ </resources>
+ """,
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "host-netmask",
+ "option_value": "abc",
+ "allowed_values": "a number of bits of the mask (1-32)",
+ },
+ None
+ ),
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "network",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ ]
+ )
+
+
+class CreateWithPortMap(CommonTest):
+ allowed_options = [
+ "id",
+ "internal-port",
+ "port",
+ "range",
+ ]
+
+ def test_no_options(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ port_map=[]
+ ),
+ self.fixture_resources_bundle_simple
+ )
+
+ def test_several_mappings_and_handle_their_ids(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ port_map=[
+ {
+ "port": "1001",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-port-map-1001",
+ "port": "2000",
+ "internal-port": "2002",
+ },
+ {
+ "range": "3000-3300",
+ },
+ ]
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network>
+ <port-mapping id="B1-port-map-1001-1" port="1001" />
+ <port-mapping
+ id="B1-port-map-1001"
+ internal-port="2002"
+ port="2000"
+ />
+ <port-mapping
+ id="B1-port-map-3000-3300"
+ range="3000-3300"
+ />
+ </network>
+ </bundle>
+ </resources>
+ """
+ )
+
+ def test_options_errors(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ port_map=[
+ {
+ },
+ {
+ "id": "not#valid",
+ },
+ {
+ "internal-port": "1000",
+ },
+ {
+ "port": "abc",
+ },
+ {
+ "port": "2000",
+ "range": "3000-4000",
+ "internal-port": "def",
+ },
+ ],
+ force_options=True
+ ),
+ # first
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_type": "port-map",
+ "option_names": ["port", "range"],
+ },
+ None
+ ),
+ # second
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "invalid_character": "#",
+ "id": "not#valid",
+ "id_description": "port-map id",
+ "is_first_char": False,
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_type": "port-map",
+ "option_names": ["port", "range"],
+ },
+ None
+ ),
+ # third
+ (
+ severities.ERROR,
+ report_codes.PREREQUISITE_OPTION_IS_MISSING,
+ {
+ "option_type": "port-map",
+ "option_name": "internal-port",
+ "prerequisite_type": "port-map",
+ "prerequisite_name": "port",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_type": "port-map",
+ "option_names": ["port", "range"],
+ },
+ None
+ ),
+ # fourth
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "port",
+ "option_value": "abc",
+ "allowed_values": "a port number (1-65535)",
+ },
+ None
+ ),
+ # fifth
+ (
+ severities.ERROR,
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ {
+ "option_names": ["port", "range", ],
+ "option_type": "port-map",
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "internal-port",
+ "option_value": "def",
+ "allowed_values": "a port number (1-65535)",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_forceable_options_errors(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ port_map=[
+ {
+ "range": "3000",
+ "extra": "option",
+ },
+ ]
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "port-map",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "range",
+ "option_value": "3000",
+ "allowed_values": "port-port",
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ )
+
+ def test_forceable_options_errors_forced(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "pcs:test",
+ },
+ port_map=[
+ {
+ "range": "3000",
+ "extra": "option",
+ },
+ ],
+ force_options=True
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network>
+ <port-mapping
+ id="B1-port-map-3000"
+ extra="option"
+ range="3000"
+ />
+ </network>
+ </bundle>
+ </resources>
+ """,
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "port-map",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "range",
+ "option_value": "3000",
+ "allowed_values": "port-port",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class CreateWithStorageMap(CommonTest):
+ allowed_options = [
+ "id",
+ "options",
+ "source-dir",
+ "source-dir-root",
+ "target-dir",
+ ]
+
+ def test_several_mappings_and_handle_their_ids(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ storage_map=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-storage-map",
+ "source-dir": "/tmp/docker2a",
+ "target-dir": "/tmp/docker2b",
+ "options": "extra options 1"
+ },
+ {
+ "source-dir-root": "/tmp/docker3a",
+ "target-dir": "/tmp/docker3b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-storage-map-2",
+ "source-dir-root": "/tmp/docker4a",
+ "target-dir": "/tmp/docker4b",
+ "options": "extra options 2"
+ },
+ ]
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <storage>
+ <storage-mapping
+ id="B1-storage-map-1"
+ source-dir="/tmp/docker1a"
+ target-dir="/tmp/docker1b"
+ />
+ <storage-mapping
+ id="B1-storage-map"
+ options="extra options 1"
+ source-dir="/tmp/docker2a"
+ target-dir="/tmp/docker2b"
+ />
+ <storage-mapping
+ id="B1-storage-map-3"
+ source-dir-root="/tmp/docker3a"
+ target-dir="/tmp/docker3b"
+ />
+ <storage-mapping
+ id="B1-storage-map-2"
+ options="extra options 2"
+ source-dir-root="/tmp/docker4a"
+ target-dir="/tmp/docker4b"
+ />
+ </storage>
+ </bundle>
+ </resources>
+ """
+ )
+
+ def test_options_errors(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ storage_map=[
+ {
+ },
+ {
+ "id": "not#valid",
+ "source-dir": "/tmp/docker1a",
+ "source-dir-root": "/tmp/docker1b",
+ "target-dir": "/tmp/docker1c",
+ },
+ ],
+ force_options=True
+ ),
+ # first
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_type": "storage-map",
+ "option_names": ["source-dir", "source-dir-root"],
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_type": "storage-map",
+ "option_names": ["target-dir", ],
+ },
+ None
+ ),
+ # second
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "invalid_character": "#",
+ "id": "not#valid",
+ "id_description": "storage-map id",
+ "is_first_char": False,
+ },
+ None
+ ),
+ (
+ severities.ERROR,
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ {
+ "option_type": "storage-map",
+ "option_names": ["source-dir", "source-dir-root"],
+ },
+ None
+ ),
+ )
+
+ def test_forceable_options_errors(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {"image": "pcs:test", },
+ storage_map=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ "extra": "option",
+ },
+ ]
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "storage-map",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ )
+
+ def test_forceable_options_errors_forced(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "pcs:test",
+ },
+ storage_map=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ "extra": "option",
+ },
+ ],
+ force_options=True
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <storage>
+ <storage-mapping
+ id="B1-storage-map"
+ source-dir="/tmp/docker1a"
+ target-dir="/tmp/docker1b"
+ extra="option"
+ />
+ </storage>
+ </bundle>
+ </resources>
+ """,
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "storage-map",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ ]
+ )
+
+
+class CreateWithAllOptions(CommonTest):
+ def test_success(self):
+ self.assert_command_effect(
+ self.fixture_cib_pre,
+ lambda: resource.bundle_create(
+ self.env, "B1", "docker",
+ {
+ "image": "pcs:test",
+ "masters": "0",
+ "network": "extra network settings",
+ "options": "extra options",
+ "run-command": "/bin/true",
+ "replicas": "4",
+ "replicas-per-host": "2",
+ },
+ {
+ "control-port": "12345",
+ "host-interface": "eth0",
+ "host-netmask": "24",
+ "ip-range-start": "192.168.100.200",
+ },
+ [
+ {
+ "port": "1001",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-port-map-1001",
+ "port": "2000",
+ "internal-port": "2002",
+ },
+ {
+ "range": "3000-3300",
+ },
+ ],
+ [
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-storage-map",
+ "source-dir": "/tmp/docker2a",
+ "target-dir": "/tmp/docker2b",
+ "options": "extra options 1"
+ },
+ {
+ "source-dir-root": "/tmp/docker3a",
+ "target-dir": "/tmp/docker3b",
+ },
+ {
+ # use an autogenerated id of the previous item
+ "id": "B1-port-map-1001-1",
+ "source-dir-root": "/tmp/docker4a",
+ "target-dir": "/tmp/docker4b",
+ "options": "extra options 2"
+ },
+ ]
+ ),
+ """
+ <resources>
+ <bundle id="B1">
+ <docker
+ image="pcs:test"
+ masters="0"
+ network="extra network settings"
+ options="extra options"
+ replicas="4"
+ replicas-per-host="2"
+ run-command="/bin/true"
+ />
+ <network
+ control-port="12345"
+ host-interface="eth0"
+ host-netmask="24"
+ ip-range-start="192.168.100.200"
+ >
+ <port-mapping id="B1-port-map-1001-2" port="1001" />
+ <port-mapping
+ id="B1-port-map-1001"
+ internal-port="2002"
+ port="2000"
+ />
+ <port-mapping
+ id="B1-port-map-3000-3300"
+ range="3000-3300"
+ />
+ </network>
+ <storage>
+ <storage-mapping
+ id="B1-storage-map-1"
+ source-dir="/tmp/docker1a"
+ target-dir="/tmp/docker1b"
+ />
+ <storage-mapping
+ id="B1-storage-map"
+ options="extra options 1"
+ source-dir="/tmp/docker2a"
+ target-dir="/tmp/docker2b"
+ />
+ <storage-mapping
+ id="B1-storage-map-2"
+ source-dir-root="/tmp/docker3a"
+ target-dir="/tmp/docker3b"
+ />
+ <storage-mapping
+ id="B1-port-map-1001-1"
+ options="extra options 2"
+ source-dir-root="/tmp/docker4a"
+ target-dir="/tmp/docker4b"
+ />
+ </storage>
+ </bundle>
+ </resources>
+ """
+ )
+
+
+class Wait(CommonTest):
+ fixture_status_running = """
+ <resources>
+ <bundle id="B1" managed="true">
+ <replica id="0">
+ <resource id="B1-docker-0" managed="true" role="Started">
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ </replica>
+ <replica id="1">
+ <resource id="B1-docker-1" managed="true" role="Started">
+ <node name="node2" id="2" cached="false"/>
+ </resource>
+ </replica>
+ </bundle>
+ </resources>
+ """
+
+ fixture_status_not_running = """
+ <resources>
+ <bundle id="B1" managed="true">
+ <replica id="0">
+ <resource id="B1-docker-0" managed="true" role="Stopped" />
+ </replica>
+ <replica id="1">
+ <resource id="B1-docker-1" managed="true" role="Stopped" />
+ </replica>
+ </bundle>
+ </resources>
+ """
+
+ timeout = 10
+
+ def fixture_calls_initial(self):
+ return (
+ fixture.call_wait_supported() +
+ fixture.calls_cib(
+ self.fixture_cib_pre,
+ self.fixture_resources_bundle_simple,
+ cib_base_file=self.cib_base_file,
+ )
+ )
+
+ def simple_bundle_create(self, wait=False):
+ return resource.bundle_create(
+ self.env, "B1", "docker", {"image": "pcs:test"}, wait=wait,
+ )
+
+ def test_wait_fail(self):
+ fixture_wait_timeout_error = dedent(
+ """\
+ Pending actions:
+ Action 12: B1-node2-stop on node2
+ Error performing operation: Timer expired
+ """
+ )
+ self.runner.set_runs(
+ self.fixture_calls_initial() +
+ fixture.call_wait(self.timeout, 62, fixture_wait_timeout_error)
+ )
+ assert_raise_library_error(
+ lambda: self.simple_bundle_create(self.timeout),
+ fixture.report_wait_for_idle_timed_out(
+ fixture_wait_timeout_error
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_wait_ok_run_ok(self):
+ self.runner.set_runs(
+ self.fixture_calls_initial() +
+ fixture.call_wait(self.timeout) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_running
+ ))
+ )
+ self.simple_bundle_create(self.timeout)
+ self.env.report_processor.assert_reports([
+ fixture.report_resource_running(
+ "B1", {"Started": ["node1", "node2"]}
+ ),
+ ])
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_wait_ok_run_fail(self):
+ self.runner.set_runs(
+ self.fixture_calls_initial() +
+ fixture.call_wait(self.timeout) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_not_running
+ ))
+ )
+ assert_raise_library_error(
+ lambda: self.simple_bundle_create(self.timeout),
+ fixture.report_resource_not_running("B1", severities.ERROR),
+ )
+ self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_bundle_update.py b/pcs/lib/commands/test/resource/test_bundle_update.py
new file mode 100644
index 0000000..55cfa7b
--- /dev/null
+++ b/pcs/lib/commands/test/resource/test_bundle_update.py
@@ -0,0 +1,826 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from textwrap import dedent
+
+from pcs.common import report_codes
+from pcs.lib.commands import resource
+from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
+import pcs.lib.commands.test.resource.fixture as fixture
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import skip_unless_pacemaker_supports_bundle
+
+class CommonTest(ResourceWithoutStateTest):
+ fixture_cib_minimal = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+
+ def setUp(self):
+ super(CommonTest, self).setUp()
+ self.cib_base_file = "cib-empty-2.8.xml"
+
+ def fixture_cib_resources(self, cib):
+ return fixture.cib_resources(cib, cib_base_file=self.cib_base_file)
+
+
+class Basics(CommonTest):
+ def test_nonexisting_id(self):
+ fixture_cib_pre = "<resources />"
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_update(self.env, "B1"),
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "B1",
+ "id_description": "bundle",
+ "context_type": "resources",
+ "context_id": "",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_not_bundle_id(self):
+ fixture_cib_pre = """
+ <resources>
+ <primitive id="B1" />
+ </resources>
+ """
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(fixture_cib_pre)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_update(self.env, "B1"),
+ (
+ severities.ERROR,
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ {
+ "id": "B1",
+ "expected_types": ["bundle"],
+ "current_type": "primitive",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_no_updates(self):
+ fixture_cib_pre = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+ self.assert_command_effect(
+ fixture_cib_pre,
+ lambda: resource.bundle_update(self.env, "B1"),
+ fixture_cib_pre
+ )
+
+ def test_cib_upgrade(self):
+ fixture_cib_pre = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+ self.runner.set_runs(
+ fixture.calls_cib_load_and_upgrade(fixture_cib_pre)
+ +
+ fixture.calls_cib(
+ fixture_cib_pre,
+ fixture_cib_pre,
+ cib_base_file=self.cib_base_file
+ )
+ )
+
+ resource.bundle_update(self.env, "B1")
+
+ self.env.report_processor.assert_reports([
+ (
+ severities.INFO,
+ report_codes.CIB_UPGRADE_SUCCESSFUL,
+ {
+ },
+ None
+ ),
+ ])
+ self.runner.assert_everything_launched()
+
+
+class ContainerDocker(CommonTest):
+ allowed_options = [
+ "image",
+ "masters",
+ "network",
+ "options",
+ "replicas",
+ "replicas-per-host",
+ "run-command",
+ ]
+
+ fixture_cib_extra_option = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" extra="option" />
+ </bundle>
+ </resources>
+ """
+
+ def test_success(self):
+ fixture_cib_pre = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" masters="3" replicas="6"/>
+ </bundle>
+ </resources>
+ """
+ fixture_cib_post = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" options="test" replicas="3" />
+ </bundle>
+ </resources>
+ """
+ self.assert_command_effect(
+ fixture_cib_pre,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ container_options={
+ "options": "test",
+ "replicas": "3",
+ "masters": "",
+ }
+ ),
+ fixture_cib_post
+ )
+
+ def test_cannot_remove_required_options(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_minimal)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ container_options={
+ "image": "",
+ "options": "test",
+ },
+ force_options=True
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "image",
+ "option_value": "",
+ "allowed_values": "image name",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_unknow_option(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_minimal)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ container_options={
+ "extra": "option",
+ }
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "container",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_unknow_option_forced(self):
+ self.assert_command_effect(
+ self.fixture_cib_minimal,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ container_options={
+ "extra": "option",
+ },
+ force_options=True
+ ),
+ self.fixture_cib_extra_option,
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "container",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_unknown_option_remove(self):
+ self.assert_command_effect(
+ self.fixture_cib_extra_option,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ container_options={
+ "extra": "",
+ }
+ ),
+ self.fixture_cib_minimal,
+ )
+
+
+class Network(CommonTest):
+ allowed_options = [
+ "control-port",
+ "host-interface",
+ "host-netmask",
+ "ip-range-start",
+ ]
+
+ fixture_cib_interface = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" />
+ </bundle>
+ </resources>
+ """
+
+ fixture_cib_extra_option = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" extra="option" />
+ </bundle>
+ </resources>
+ """
+
+ def test_add_network(self):
+ self.assert_command_effect(
+ self.fixture_cib_minimal,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ network_options={
+ "host-interface": "eth0",
+ }
+ ),
+ self.fixture_cib_interface
+ )
+
+ def test_remove_network(self):
+ self.assert_command_effect(
+ self.fixture_cib_interface,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ network_options={
+ "host-interface": "",
+ }
+ ),
+ self.fixture_cib_minimal
+ )
+
+ def test_keep_network_when_port_map_set(self):
+ fixture_cib_pre = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0">
+ <something />
+ </network>
+ </bundle>
+ </resources>
+ """
+ fixture_cib_post = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network>
+ <something />
+ </network>
+ </bundle>
+ </resources>
+ """
+ self.assert_command_effect(
+ fixture_cib_pre,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ network_options={
+ "host-interface": "",
+ }
+ ),
+ fixture_cib_post
+ )
+
+ def test_success(self):
+ fixture_cib_pre = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" control-port="12345" />
+ </bundle>
+ </resources>
+ """
+ fixture_cib_post = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" host-netmask="24" />
+ </bundle>
+ </resources>
+ """
+ self.assert_command_effect(
+ fixture_cib_pre,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ network_options={
+ "control-port": "",
+ "host-netmask": "24",
+ }
+ ),
+ fixture_cib_post
+ )
+
+ def test_unknow_option(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_interface)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ network_options={
+ "extra": "option",
+ }
+ ),
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "network",
+ "allowed": self.allowed_options,
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_unknow_option_forced(self):
+ self.assert_command_effect(
+ self.fixture_cib_interface,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ network_options={
+ "extra": "option",
+ },
+ force_options=True
+ ),
+ self.fixture_cib_extra_option,
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["extra", ],
+ "option_type": "network",
+ "allowed": self.allowed_options,
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_unknown_option_remove(self):
+ self.assert_command_effect(
+ self.fixture_cib_extra_option,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ network_options={
+ "extra": "",
+ }
+ ),
+ self.fixture_cib_interface,
+ )
+
+
+class PortMap(CommonTest):
+ allowed_options = [
+ "id",
+ "port",
+ "internal-port",
+ "range",
+ ]
+
+ fixture_cib_port_80 = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network>
+ <port-mapping id="B1-port-map-80" port="80" />
+ </network>
+ </bundle>
+ </resources>
+ """
+
+ fixture_cib_port_80_8080 = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network>
+ <port-mapping id="B1-port-map-80" port="80" />
+ <port-mapping id="B1-port-map-8080" port="8080" />
+ </network>
+ </bundle>
+ </resources>
+ """
+
+ def test_add_network(self):
+ self.assert_command_effect(
+ self.fixture_cib_minimal,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ port_map_add=[
+ {
+ "port": "80",
+ }
+ ]
+ ),
+ self.fixture_cib_port_80
+ )
+
+ def test_remove_network(self):
+ self.assert_command_effect(
+ self.fixture_cib_port_80,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ port_map_remove=[
+ "B1-port-map-80",
+ ]
+ ),
+ self.fixture_cib_minimal
+ )
+
+ def test_keep_network_when_options_set(self):
+ fixture_cib_pre = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0">
+ <port-mapping id="B1-port-map-80" port="80" />
+ </network>
+ </bundle>
+ </resources>
+ """
+ fixture_cib_post = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <network host-interface="eth0" />
+ </bundle>
+ </resources>
+ """
+ self.assert_command_effect(
+ fixture_cib_pre,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ port_map_remove=[
+ "B1-port-map-80",
+ ]
+ ),
+ fixture_cib_post
+ )
+
+ def test_add(self):
+ self.assert_command_effect(
+ self.fixture_cib_port_80,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ port_map_add=[
+ {
+ "port": "8080",
+ }
+ ]
+ ),
+ self.fixture_cib_port_80_8080
+ )
+
+ def test_remove(self):
+ self.assert_command_effect(
+ self.fixture_cib_port_80_8080,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ port_map_remove=[
+ "B1-port-map-8080",
+ ]
+ ),
+ self.fixture_cib_port_80
+ )
+
+ def test_remove_missing(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_port_80)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ port_map_remove=[
+ "B1-port-map-8080",
+ ]
+ ),
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "B1-port-map-8080",
+ "id_description": "port-map",
+ "context_type": "bundle",
+ "context_id": "B1",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+
+class StorageMap(CommonTest):
+ allowed_options = [
+ "id",
+ "options",
+ "source-dir",
+ "source-dir-root",
+ "target-dir",
+ ]
+
+ fixture_cib_storage_1 = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <storage>
+ <storage-mapping
+ id="B1-storage-map"
+ source-dir="/tmp/docker1a"
+ target-dir="/tmp/docker1b"
+ />
+ </storage>
+ </bundle>
+ </resources>
+ """
+
+ fixture_cib_storage_1_2 = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ <storage>
+ <storage-mapping
+ id="B1-storage-map"
+ source-dir="/tmp/docker1a"
+ target-dir="/tmp/docker1b"
+ />
+ <storage-mapping
+ id="B1-storage-map-1"
+ source-dir="/tmp/docker2a"
+ target-dir="/tmp/docker2b"
+ />
+ </storage>
+ </bundle>
+ </resources>
+ """
+
+ def test_add_storage(self):
+ self.assert_command_effect(
+ self.fixture_cib_minimal,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ storage_map_add=[
+ {
+ "source-dir": "/tmp/docker1a",
+ "target-dir": "/tmp/docker1b",
+ }
+ ]
+ ),
+ self.fixture_cib_storage_1
+ )
+
+ def test_remove_storage(self):
+ self.assert_command_effect(
+ self.fixture_cib_storage_1,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ storage_map_remove=[
+ "B1-storage-map",
+ ]
+ ),
+ self.fixture_cib_minimal
+ )
+
+ def test_add(self):
+ self.assert_command_effect(
+ self.fixture_cib_storage_1,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ storage_map_add=[
+ {
+ "source-dir": "/tmp/docker2a",
+ "target-dir": "/tmp/docker2b",
+ }
+ ]
+ ),
+ self.fixture_cib_storage_1_2
+ )
+
+ def test_remove(self):
+ self.assert_command_effect(
+ self.fixture_cib_storage_1_2,
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ storage_map_remove=[
+ "B1-storage-map-1",
+ ]
+ ),
+ self.fixture_cib_storage_1
+ )
+
+ def test_remove_missing(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ self.fixture_cib_resources(self.fixture_cib_storage_1)
+ )
+ )
+ assert_raise_library_error(
+ lambda: resource.bundle_update(
+ self.env, "B1",
+ storage_map_remove=[
+ "B1-storage-map-1",
+ ]
+ ),
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "B1-storage-map-1",
+ "id_description": "storage-map",
+ "context_type": "bundle",
+ "context_id": "B1",
+ },
+ None
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+
+class Wait(CommonTest):
+ fixture_status_running = """
+ <resources>
+ <bundle id="B1" managed="true" image="new:image">
+ <replica id="0">
+ <resource id="B1-docker-0" managed="true" role="Started">
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ </replica>
+ <replica id="1">
+ <resource id="B1-docker-1" managed="true" role="Started">
+ <node name="node2" id="2" cached="false"/>
+ </resource>
+ </replica>
+ </bundle>
+ </resources>
+ """
+
+ fixture_status_not_running = """
+ <resources>
+ <bundle id="B1" managed="true" image="new:image">
+ <replica id="0">
+ <resource id="B1-docker-0" managed="true" role="Stopped" />
+ </replica>
+ <replica id="1">
+ <resource id="B1-docker-1" managed="true" role="Stopped" />
+ </replica>
+ </bundle>
+ </resources>
+ """
+
+ fixture_cib_pre = """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+
+ fixture_resources_bundle_simple = """
+ <resources>
+ <bundle id="B1">
+ <docker image="new:image" />
+ </bundle>
+ </resources>
+ """
+
+ timeout = 10
+
+ def fixture_calls_initial(self):
+ return (
+ fixture.call_wait_supported() +
+ fixture.calls_cib(
+ self.fixture_cib_pre,
+ self.fixture_resources_bundle_simple,
+ cib_base_file=self.cib_base_file,
+ )
+ )
+
+ def simple_bundle_update(self, wait=False):
+ return resource.bundle_update(
+ self.env, "B1", {"image": "new:image"}, wait=wait,
+ )
+
+ def test_wait_fail(self):
+ fixture_wait_timeout_error = dedent(
+ """\
+ Pending actions:
+ Action 12: B1-node2-stop on node2
+ Error performing operation: Timer expired
+ """
+ )
+ self.runner.set_runs(
+ self.fixture_calls_initial() +
+ fixture.call_wait(self.timeout, 62, fixture_wait_timeout_error)
+ )
+ assert_raise_library_error(
+ lambda: self.simple_bundle_update(self.timeout),
+ fixture.report_wait_for_idle_timed_out(
+ fixture_wait_timeout_error
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_wait_ok_run_ok(self):
+ self.runner.set_runs(
+ self.fixture_calls_initial() +
+ fixture.call_wait(self.timeout) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_running
+ ))
+ )
+ self.simple_bundle_update(self.timeout)
+ self.env.report_processor.assert_reports([
+ fixture.report_resource_running(
+ "B1", {"Started": ["node1", "node2"]}
+ ),
+ ])
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_wait_ok_run_fail(self):
+ self.runner.set_runs(
+ self.fixture_calls_initial() +
+ fixture.call_wait(self.timeout) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_not_running
+ ))
+ )
+ assert_raise_library_error(
+ lambda: self.simple_bundle_update(self.timeout),
+ fixture.report_resource_not_running("B1", severities.ERROR),
+ )
+ self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_resource_create.py b/pcs/lib/commands/test/resource/test_resource_create.py
new file mode 100644
index 0000000..6438a70
--- /dev/null
+++ b/pcs/lib/commands/test/resource/test_resource_create.py
@@ -0,0 +1,1295 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from functools import partial
+import logging
+
+from lxml import etree
+
+from pcs.test.tools.pcs_unittest import TestCase, mock
+from pcs.common import report_codes
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.commands import resource
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
+import pcs.lib.commands.test.resource.fixture as fixture
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.integration_lib import (
+ Call,
+ Runner,
+)
+from pcs.test.tools.misc import (
+ get_test_resource as rc,
+ outdent,
+ skip_unless_pacemaker_supports_bundle,
+)
+from pcs.test.tools.xml import etree_to_str
+
+
+runner = Runner()
+
+fixture_cib_resources_xml_simplest = """<resources>
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+ </operations>
+ </primitive>
+</resources>"""
+
+fixture_cib_resources_xml_simplest_disabled = """<resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role" name="target-role"
+ value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+ </operations>
+ </primitive>
+</resources>"""
+
+fixture_cib_resources_xml_master_simplest = """<resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </master>
+</resources>"""
+
+
+fixture_cib_resources_xml_master_simplest_disabled = """<resources>
+ <master id="A-master">
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-target-role" name="target-role"
+ value="Stopped"
+ />
+ </meta_attributes>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </master>
+</resources>"""
+
+fixture_cib_resources_xml_master_simplest_disabled_meta_after = """<resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-target-role" name="target-role"
+ value="Stopped"
+ />
+ </meta_attributes>
+ </master>
+</resources>"""
+
+fixture_cib_resources_xml_group_simplest = """<resources>
+ <group id="G">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </group>
+</resources>"""
+
+
+fixture_cib_resources_xml_group_simplest_disabled = """<resources>
+ <group id="G">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role" name="target-role"
+ value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </group>
+</resources>"""
+
+
+fixture_cib_resources_xml_clone_simplest = """<resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </clone>
+</resources>"""
+
+fixture_cib_resources_xml_clone_simplest_disabled = """<resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role"
+ value="Stopped"
+ />
+ </meta_attributes>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10" name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </clone>
+</resources>"""
+
+def fixture_state_resources_xml(role="Started", failed="false"):
+ return(
+ """
+ <resources>
+ <resource
+ id="A"
+ resource_agent="ocf::heartbeat:Dummy"
+ role="{role}"
+ active="true"
+ orphaned="false"
+ managed="true"
+ failed="{failed}"
+ failure_ignored="false"
+ nodes_running_on="1"
+ >
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ </resources>
+ """.format(
+ role=role,
+ failed=failed,
+ )
+ )
+
+def fixture_cib_calls(cib_resources_xml):
+ cib_xml = open(rc("cib-empty.xml")).read()
+
+ cib = etree.fromstring(cib_xml)
+ resources_section = cib.find(".//resources")
+ for child in etree.fromstring(cib_resources_xml):
+ resources_section.append(child)
+
+ return [
+ Call("cibadmin --local --query", cib_xml),
+ Call(
+ "cibadmin --replace --verbose --xml-pipe --scope configuration",
+ check_stdin=Call.create_check_stdin_xml(etree_to_str(cib))
+ ),
+ ]
+
+def fixture_agent_load_calls():
+ return [
+ Call(
+ "crm_resource --show-metadata ocf:heartbeat:Dummy",
+ open(rc("resource_agent_ocf_heartbeat_dummy.xml")).read()
+ ),
+ ]
+
+
+def fixture_pre_timeout_calls(cib_resources_xml):
+ return (
+ fixture_agent_load_calls()
+ +
+ [
+ Call("crm_resource -?", "--wait"),
+ ]
+ +
+ fixture_cib_calls(cib_resources_xml)
+ )
+
+def fixture_wait_and_get_state_calls(state_resource_xml):
+ crm_mon = etree.fromstring(open(rc("crm_mon.minimal.xml")).read())
+ crm_mon.append(etree.fromstring(state_resource_xml))
+
+ return [
+ Call("crm_resource --wait --timeout=10"),
+ Call(
+ "crm_mon --one-shot --as-xml --inactive",
+ etree_to_str(crm_mon),
+ ),
+ ]
+
+def fixture_calls_including_waiting(cib_resources_xml, state_resources_xml):
+ return (
+ fixture_pre_timeout_calls(cib_resources_xml)
+ +
+ fixture_wait_and_get_state_calls(state_resources_xml)
+ )
+
+class CommonResourceTest(TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.patcher = mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: runner
+ )
+ cls.patcher.start()
+ cls.patcher_corosync = mock.patch.object(
+ LibraryEnvironment,
+ "get_corosync_conf_data",
+ lambda self: open(rc("corosync.conf")).read()
+ )
+ cls.patcher_corosync.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.patcher.stop()
+ cls.patcher_corosync.stop()
+
+ def setUp(self):
+ self.env = LibraryEnvironment(
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+ )
+ self.create = partial(self.get_create(), self.env)
+
+ def assert_command_effect(self, cmd, cib_resources_xml, reports=None):
+ runner.set_runs(
+ fixture_agent_load_calls()
+ +
+ fixture_cib_calls(cib_resources_xml)
+ )
+ cmd()
+ self.env.report_processor.assert_reports(reports if reports else [])
+ runner.assert_everything_launched()
+
+ def assert_wait_fail(self, command, cib_resources_xml):
+ wait_error_message = outdent(
+ """\
+ Pending actions:
+ Action 39: stonith-vm-rhel72-1-reboot on vm-rhel72-1
+ Error performing operation: Timer expired
+ """
+ )
+
+ runner.set_runs(fixture_pre_timeout_calls(cib_resources_xml) + [
+ Call(
+ "crm_resource --wait --timeout=10",
+ stderr=wait_error_message,
+ returncode=62,
+ ),
+ ])
+
+ assert_raise_library_error(
+ command,
+ (
+ severities.ERROR,
+ report_codes.WAIT_FOR_IDLE_TIMED_OUT,
+ {
+ "reason": wait_error_message.strip(),
+ },
+ None
+ )
+ )
+ runner.assert_everything_launched()
+
+ def assert_wait_ok_run_fail(
+ self, command, cib_resources_xml, state_resources_xml
+ ):
+ runner.set_runs(fixture_calls_including_waiting(
+ cib_resources_xml,
+ state_resources_xml
+ ))
+
+ assert_raise_library_error(
+ command,
+ (
+ severities.ERROR,
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ {
+ "resource_id": "A",
+ },
+ None
+ )
+ )
+ runner.assert_everything_launched()
+
+ def assert_wait_ok_run_ok(
+ self, command, cib_resources_xml, state_resources_xml
+ ):
+ runner.set_runs(fixture_calls_including_waiting(
+ cib_resources_xml,
+ state_resources_xml
+ ))
+ command()
+ self.env.report_processor.assert_reports([
+ (
+ severities.INFO,
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ {
+ "roles_with_nodes": {"Started": ["node1"]},
+ "resource_id": "A",
+ },
+ None
+ ),
+ ])
+ runner.assert_everything_launched()
+
+ def assert_wait_ok_disable_fail(
+ self, command, cib_resources_xml, state_resources_xml
+ ):
+ runner.set_runs(fixture_calls_including_waiting(
+ cib_resources_xml,
+ state_resources_xml
+ ))
+
+ assert_raise_library_error(
+ command,
+ (
+ severities.ERROR,
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ {
+ 'roles_with_nodes': {'Started': ['node1']},
+ 'resource_id': 'A'
+ },
+ None
+ )
+ )
+ runner.assert_everything_launched()
+
+ def assert_wait_ok_disable_ok(
+ self, command, cib_resources_xml, state_resources_xml
+ ):
+ runner.set_runs(fixture_calls_including_waiting(
+ cib_resources_xml,
+ state_resources_xml
+ ))
+ command()
+ self.env.report_processor.assert_reports([
+ (
+ severities.INFO,
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ {
+ "resource_id": "A",
+ },
+ None
+ ),
+ ])
+ runner.assert_everything_launched()
+
+class Create(CommonResourceTest):
+ def get_create(self):
+ return resource.create
+
+ def simplest_create(self, wait=False, disabled=False, meta_attributes=None):
+ return self.create(
+ "A", "ocf:heartbeat:Dummy",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ wait=wait,
+ ensure_disabled=disabled
+ )
+
+ def test_simplest_resource(self):
+ self.assert_command_effect(
+ self.simplest_create,
+ fixture_cib_resources_xml_simplest
+ )
+
+ def test_resource_with_operation(self):
+ self.assert_command_effect(
+ lambda: self.create(
+ "A", "ocf:heartbeat:Dummy",
+ operations=[
+ {"name": "monitor", "timeout": "10s", "interval": "10"}
+ ],
+ meta_attributes={},
+ instance_attributes={},
+ ),
+ """<resources>
+ <primitive class="ocf" id="A" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="10s"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_fail_wait(self):
+ self.assert_wait_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_simplest,
+ )
+
+ def test_wait_ok_run_fail(self):
+ self.assert_wait_ok_run_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_simplest,
+ fixture_state_resources_xml(failed="true"),
+ )
+
+ def test_wait_ok_run_ok(self):
+ self.assert_wait_ok_run_ok(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_simplest,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_fail(self):
+ self.assert_wait_ok_disable_fail(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_simplest_disabled,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_ok(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_simplest_disabled,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_target_role(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ meta_attributes={"target-role": "Stopped"}
+ ),
+ fixture_cib_resources_xml_simplest_disabled,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+class CreateAsMaster(CommonResourceTest):
+ def get_create(self):
+ return resource.create_as_master
+
+ def simplest_create(
+ self, wait=False, disabled=False, meta_attributes=None,
+ master_meta_options=None
+ ):
+ return self.create(
+ "A", "ocf:heartbeat:Dummy",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ clone_meta_options=master_meta_options if master_meta_options
+ else {}
+ ,
+ wait=wait,
+ ensure_disabled=disabled
+ )
+
+ def test_simplest_resource(self):
+ self.assert_command_effect(
+ self.simplest_create,
+ fixture_cib_resources_xml_master_simplest
+ )
+
+ def test_fail_wait(self):
+ self.assert_wait_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_master_simplest,
+ )
+
+ def test_wait_ok_run_fail(self):
+ self.assert_wait_ok_run_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_master_simplest,
+ fixture_state_resources_xml(failed="true"),
+ )
+
+ def test_wait_ok_run_ok(self):
+ self.assert_wait_ok_run_ok(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_master_simplest,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_fail(self):
+ self.assert_wait_ok_disable_fail(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_master_simplest_disabled,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_ok(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_master_simplest_disabled,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_target_role(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ meta_attributes={"target-role": "Stopped"}
+ ),
+ """<resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </master>
+ </resources>""",
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_target_role_in_master(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ master_meta_options={"target-role": "Stopped"}
+ ),
+ fixture_cib_resources_xml_master_simplest_disabled_meta_after,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_clone_max(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ master_meta_options={"clone-max": "0"}
+ ),
+ """<resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-clone-max"
+ name="clone-max" value="0"
+ />
+ </meta_attributes>
+ </master>
+ </resources>""",
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_clone_node_max(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ master_meta_options={"clone-node-max": "0"}
+ ),
+ """<resources>
+ <master id="A-master">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-clone-node-max"
+ name="clone-node-max" value="0"
+ />
+ </meta_attributes>
+ </master>
+ </resources>""",
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+class CreateInGroup(CommonResourceTest):
+ def get_create(self):
+ return resource.create_in_group
+
+ def simplest_create(self, wait=False, disabled=False, meta_attributes=None):
+ return self.create(
+ "A", "ocf:heartbeat:Dummy", "G",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ wait=wait,
+ ensure_disabled=disabled
+ )
+
+ def test_simplest_resource(self):
+ self.assert_command_effect(self.simplest_create, """<resources>
+ <group id="G">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>""")
+
+ def test_fail_wait(self):
+ self.assert_wait_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_group_simplest,
+ )
+
+ def test_wait_ok_run_fail(self):
+ self.assert_wait_ok_run_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_group_simplest,
+ fixture_state_resources_xml(failed="true"),
+ )
+
+ def test_wait_ok_run_ok(self):
+ self.assert_wait_ok_run_ok(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_group_simplest,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_fail(self):
+ self.assert_wait_ok_disable_fail(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_group_simplest_disabled,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_ok(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_group_simplest_disabled,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_target_role(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ meta_attributes={"target-role": "Stopped"}
+ ),
+ fixture_cib_resources_xml_group_simplest_disabled,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+class CreateAsClone(CommonResourceTest):
+ def get_create(self):
+ return resource.create_as_clone
+
+ def simplest_create(
+ self, wait=False, disabled=False, meta_attributes=None,
+ clone_options=None
+ ):
+ return self.create(
+ "A", "ocf:heartbeat:Dummy",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ clone_meta_options=clone_options if clone_options else {},
+ wait=wait,
+ ensure_disabled=disabled
+ )
+
+ def test_simplest_resource(self):
+ self.assert_command_effect(
+ self.simplest_create,
+ fixture_cib_resources_xml_clone_simplest
+ )
+
+ def test_fail_wait(self):
+ self.assert_wait_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_clone_simplest,
+ )
+
+ def test_wait_ok_run_fail(self):
+ self.assert_wait_ok_run_fail(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_clone_simplest,
+ fixture_state_resources_xml(failed="true"),
+ )
+
+ def test_wait_ok_run_ok(self):
+ self.assert_wait_ok_run_ok(
+ lambda: self.simplest_create(wait="10"),
+ fixture_cib_resources_xml_clone_simplest,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_fail(self):
+ self.assert_wait_ok_disable_fail(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_clone_simplest_disabled,
+ fixture_state_resources_xml(),
+ )
+
+ def test_wait_ok_disable_ok(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(wait="10", disabled=True),
+ fixture_cib_resources_xml_clone_simplest_disabled,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_target_role(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ meta_attributes={"target-role": "Stopped"}
+ ),
+ """<resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role"
+ value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </clone>
+ </resources>"""
+ ,
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_target_role_in_clone(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ clone_options={"target-role": "Stopped"}
+ ),
+ """<resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>""",
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_clone_max(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ clone_options={"clone-max": "0"}
+ ),
+ """<resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-clone-max"
+ name="clone-max" value="0"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>""",
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+ def test_wait_ok_disable_ok_by_clone_node_max(self):
+ self.assert_wait_ok_disable_ok(
+ lambda: self.simplest_create(
+ wait="10",
+ clone_options={"clone-node-max": "0"}
+ ),
+ """<resources>
+ <clone id="A-clone">
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor"
+ timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-clone-node-max"
+ name="clone-node-max" value="0"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>""",
+ fixture_state_resources_xml(role="Stopped"),
+ )
+
+
+class CreateInToBundle(ResourceWithoutStateTest):
+ upgraded_cib = "cib-empty-2.8.xml"
+
+ fixture_empty_resources = "<resources />"
+
+ fixture_resources_pre = """
+ <resources>
+ <bundle id="B"/>
+ </resources>
+ """
+
+ fixture_resources_post_simple = """
+ <resources>
+ <bundle id="B">
+ <primitive
+ class="ocf" id="A" provider="heartbeat" type="Dummy"
+ >
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ """
+
+ fixture_resources_post_disabled = """
+ <resources>
+ <bundle id="B">
+ <primitive
+ class="ocf" id="A" provider="heartbeat" type="Dummy"
+ >
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="A-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="A-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ """
+
+ fixture_status_stopped = """
+ <resources>
+ <bundle id="B" managed="true">
+ <replica id="0">
+ <resource id="B-0" managed="true" role="Stopped" />
+ </replica>
+ </bundle>
+ </resources>
+ """
+
+ fixture_status_running_with_primitive = """
+ <resources>
+ <bundle id="B" managed="true">
+ <replica id="0">
+ <resource id="B-0" managed="true" role="Started">
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ <resource id="A" managed="true" role="Started">
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ </replica>
+ </bundle>
+ </resources>
+ """
+
+ fixture_status_primitive_not_running = """
+ <resources>
+ <bundle id="B" managed="true">
+ <replica id="0">
+ <resource id="B-0" managed="true" role="Started">
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ <resource id="A" managed="true" role="Stopped"/>
+ </replica>
+ </bundle>
+ </resources>
+ """
+
+ fixture_wait_timeout_error = outdent(
+ """\
+ Pending actions:
+ Action 12: B-node2-stop on node2
+ Error performing operation: Timer expired
+ """
+ )
+
+ def simplest_create(self, wait=False, disabled=False, meta_attributes=None):
+ return resource.create_into_bundle(
+ self.env,
+ "A", "ocf:heartbeat:Dummy",
+ operations=[],
+ meta_attributes=meta_attributes if meta_attributes else {},
+ instance_attributes={},
+ bundle_id="B",
+ wait=wait,
+ ensure_disabled=disabled
+ )
+
+ def test_upgrade_cib(self):
+ self.runner.set_runs(
+ fixture_agent_load_calls()
+ +
+ fixture.calls_cib_load_and_upgrade(self.fixture_empty_resources)
+ +
+ fixture.calls_cib(
+ self.fixture_resources_pre,
+ self.fixture_resources_post_simple,
+ self.upgraded_cib,
+ )
+ )
+ self.simplest_create()
+ self.runner.assert_everything_launched()
+
+ def test_simplest_resource(self):
+ self.runner.set_runs(
+ fixture_agent_load_calls()
+ +
+ fixture.calls_cib(
+ self.fixture_resources_pre,
+ self.fixture_resources_post_simple,
+ self.upgraded_cib,
+ )
+ )
+ self.simplest_create()
+ self.runner.assert_everything_launched()
+
+ def test_bundle_doesnt_exist(self):
+ self.runner.set_runs(
+ fixture_agent_load_calls()
+ +
+ fixture.call_cib_load(fixture.cib_resources(
+ self.fixture_empty_resources, self.upgraded_cib,
+ ))
+ )
+ assert_raise_library_error(
+ self.simplest_create,
+ (
+ severities.ERROR,
+ report_codes.ID_NOT_FOUND,
+ {
+ "id": "B",
+ "id_description": "bundle",
+ "context_type": "resources",
+ "context_id": "",
+ }
+ )
+ )
+
+ def test_id_not_bundle(self):
+ resources_pre_update = """<resources>
+ <primitive id="B"/>
+ </resources>"""
+ self.runner.set_runs(
+ fixture_agent_load_calls()
+ +
+ fixture.call_cib_load(fixture.cib_resources(
+ resources_pre_update, self.upgraded_cib,
+ ))
+ )
+ assert_raise_library_error(
+ self.simplest_create,
+ (
+ severities.ERROR,
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ {
+ "id": "B",
+ "expected_types": ["bundle"],
+ "current_type": "primitive",
+ }
+ )
+ )
+
+ def test_bundle_not_empty(self):
+ resources_pre_update = """<resources>
+ <bundle id="B">
+ <primitive id="P"/>
+ </bundle>
+ </resources>"""
+ self.runner.set_runs(
+ fixture_agent_load_calls()
+ +
+ fixture.call_cib_load(fixture.cib_resources(
+ resources_pre_update, self.upgraded_cib,
+ ))
+ )
+ assert_raise_library_error(
+ self.simplest_create,
+ (
+ severities.ERROR,
+ report_codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE,
+ {
+ "bundle_id": "B",
+ "resource_id": "P",
+ }
+ )
+ )
+
+ def test_wait_fail(self):
+ self.runner.set_runs(
+ fixture.call_dummy_metadata() +
+ fixture.call_wait_supported() +
+ fixture.calls_cib(
+ self.fixture_resources_pre,
+ self.fixture_resources_post_simple,
+ cib_base_file=self.upgraded_cib,
+ ) +
+ fixture.call_wait(10, 62, self.fixture_wait_timeout_error)
+ )
+ assert_raise_library_error(
+ lambda: self.simplest_create(10),
+ fixture.report_wait_for_idle_timed_out(
+ self.fixture_wait_timeout_error
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_wait_ok_run_ok(self):
+ self.runner.set_runs(
+ fixture.call_dummy_metadata() +
+ fixture.call_wait_supported() +
+ fixture.calls_cib(
+ self.fixture_resources_pre,
+ self.fixture_resources_post_simple,
+ cib_base_file=self.upgraded_cib,
+ ) +
+ fixture.call_wait(10) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_running_with_primitive
+ ))
+ )
+ self.simplest_create(10)
+ self.env.report_processor.assert_reports([
+ fixture.report_resource_running("A", {"Started": ["node1"]}),
+ ])
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_wait_ok_run_fail(self):
+ self.runner.set_runs(
+ fixture.call_dummy_metadata() +
+ fixture.call_wait_supported() +
+ fixture.calls_cib(
+ self.fixture_resources_pre,
+ self.fixture_resources_post_simple,
+ cib_base_file=self.upgraded_cib,
+ ) +
+ fixture.call_wait(10) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_primitive_not_running
+ ))
+ )
+ assert_raise_library_error(
+ lambda: self.simplest_create(10),
+ fixture.report_resource_not_running("A", severities.ERROR),
+ )
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_disabled_wait_ok_not_running(self):
+ self.runner.set_runs(
+ fixture.call_dummy_metadata() +
+ fixture.call_wait_supported() +
+ fixture.calls_cib(
+ self.fixture_resources_pre,
+ self.fixture_resources_post_disabled,
+ cib_base_file=self.upgraded_cib,
+ ) +
+ fixture.call_wait(10) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_primitive_not_running
+ ))
+ )
+ self.simplest_create(10, disabled=True)
+ self.env.report_processor.assert_reports([
+ fixture.report_resource_not_running("A")
+ ])
+ self.runner.assert_everything_launched()
+
+ @skip_unless_pacemaker_supports_bundle
+ def test_disabled_wait_ok_running(self):
+ self.runner.set_runs(
+ fixture.call_dummy_metadata() +
+ fixture.call_wait_supported() +
+ fixture.calls_cib(
+ self.fixture_resources_pre,
+ self.fixture_resources_post_disabled,
+ cib_base_file=self.upgraded_cib,
+ ) +
+ fixture.call_wait(10) +
+ fixture.call_status(fixture.state_complete(
+ self.fixture_status_running_with_primitive
+ ))
+ )
+ assert_raise_library_error(
+ lambda: self.simplest_create(10, disabled=True),
+ fixture.report_resource_running(
+ "A", {"Started": ["node1"]}, severities.ERROR
+ ),
+ )
+ self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_resource_enable_disable.py b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
new file mode 100644
index 0000000..91ac068
--- /dev/null
+++ b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
@@ -0,0 +1,1519 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.common import report_codes
+from pcs.lib.commands import resource
+from pcs.lib.commands.test.resource.common import ResourceWithStateTest
+import pcs.lib.commands.test.resource.fixture as fixture
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import (
+ outdent,
+ skip_unless_pacemaker_supports_bundle,
+)
+
+
+fixture_primitive_cib_enabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+"""
+fixture_primitive_cib_disabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+"""
+fixture_primitive_status_managed = """
+ <resources>
+ <resource id="A" managed="true" />
+ </resources>
+"""
+fixture_primitive_status_unmanaged = """
+ <resources>
+ <resource id="A" managed="false" />
+ </resources>
+"""
+
+fixture_two_primitives_cib_enabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+"""
+fixture_two_primitives_cib_disabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+"""
+fixture_two_primitives_cib_disabled_both = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <meta_attributes id="B-meta_attributes">
+ <nvpair id="B-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+"""
+fixture_two_primitives_status_managed = """
+ <resources>
+ <resource id="A" managed="true" />
+ <resource id="B" managed="true" />
+ </resources>
+"""
+
+fixture_group_cib_enabled = """
+ <resources>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </group>
+ </resources>
+"""
+fixture_group_cib_disabled_group = """
+ <resources>
+ <group id="A">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </group>
+ </resources>
+"""
+fixture_group_cib_disabled_primitive = """
+ <resources>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </group>
+ </resources>
+"""
+fixture_group_cib_disabled_both = """
+ <resources>
+ <group id="A">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </group>
+ </resources>
+"""
+fixture_group_status_managed = """
+ <resources>
+ <group id="A" number_resources="2">
+ <resource id="A1" managed="true" />
+ <resource id="A2" managed="true" />
+ </group>
+ </resources>
+"""
+fixture_group_status_unmanaged = """
+ <resources>
+ <group id="A" number_resources="2">
+ <resource id="A1" managed="false" />
+ <resource id="A2" managed="false" />
+ </group>
+ </resources>
+"""
+
+fixture_clone_cib_enabled = """
+ <resources>
+ <clone id="A-clone">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_cib_disabled_clone = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_cib_disabled_primitive = """
+ <resources>
+ <clone id="A-clone">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_cib_disabled_both = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_status_managed = """
+ <resources>
+ <clone id="A-clone" managed="true" multi_state="false" unique="false">
+ <resource id="A" managed="true" />
+ <resource id="A" managed="true" />
+ </clone>
+ </resources>
+"""
+fixture_clone_status_unmanaged = """
+ <resources>
+ <clone id="A-clone" managed="false" multi_state="false" unique="false">
+ <resource id="A" managed="false" />
+ <resource id="A" managed="false" />
+ </clone>
+ </resources>
+"""
+
+fixture_master_cib_enabled = """
+ <resources>
+ <master id="A-master">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_cib_disabled_master = """
+ <resources>
+ <master id="A-master">
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_cib_disabled_primitive = """
+ <resources>
+ <master id="A-master">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_cib_disabled_both = """
+ <resources>
+ <master id="A-master">
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_status_managed = """
+ <resources>
+ <clone id="A-master" managed="true" multi_state="true" unique="false">
+ <resource id="A" managed="true" />
+ <resource id="A" managed="true" />
+ </clone>
+ </resources>
+"""
+fixture_master_status_unmanaged = """
+ <resources>
+ <clone id="A-master" managed="false" multi_state="true" unique="false">
+ <resource id="A" managed="false" />
+ <resource id="A" managed="false" />
+ </clone>
+ </resources>
+"""
+
+fixture_clone_group_cib_enabled = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_disabled_clone = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_disabled_group = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_disabled_primitive = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_disabled_clone_group = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <group id="A">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_disabled_all = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <group id="A">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_status_managed = """
+ <resources>
+ <clone id="A-clone" managed="true" multi_state="false" unique="false">
+ <group id="A:0" number_resources="2">
+ <resource id="A1" managed="true" />
+ <resource id="A2" managed="true" />
+ </group>
+ <group id="A:1" number_resources="2">
+ <resource id="A1" managed="true" />
+ <resource id="A2" managed="true" />
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_status_unmanaged = """
+ <resources>
+ <clone id="A-clone" managed="false" multi_state="false" unique="false">
+ <group id="A:0" number_resources="2">
+ <resource id="A1" managed="false" />
+ <resource id="A2" managed="false" />
+ </group>
+ <group id="A:1" number_resources="2">
+ <resource id="A1" managed="false" />
+ <resource id="A2" managed="false" />
+ </group>
+ </clone>
+ </resources>
+"""
+
+fixture_bundle_cib_enabled = """
+ <resources>
+ <bundle id="A-bundle">
+ <docker image="pcs:test" />
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </bundle>
+ </resources>
+"""
+fixture_bundle_cib_disabled_primitive = """
+ <resources>
+ <bundle id="A-bundle">
+ <docker image="pcs:test" />
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </bundle>
+ </resources>
+"""
+fixture_bundle_status_managed = """
+ <resources>
+ <bundle id="A-bundle" type="docker" image="pcmktest:http"
+ unique="false" managed="true" failed="false"
+ >
+ <replica id="0">
+ <resource id="A" />
+ </replica>
+ <replica id="1">
+ <resource id="A" />
+ </replica>
+ </bundle>
+ </resources>
+"""
+fixture_bundle_status_unmanaged = """
+ <resources>
+ <bundle id="A-bundle" type="docker" image="pcmktest:http"
+ unique="false" managed="true" failed="false"
+ >
+ <replica id="0">
+ <resource id="A" managed="false" />
+ </replica>
+ <replica id="1">
+ <resource id="A" managed="false" />
+ </replica>
+ </bundle>
+ </resources>
+"""
+
+def fixture_report_unmanaged(resource):
+ return (
+ severities.WARNING,
+ report_codes.RESOURCE_IS_UNMANAGED,
+ {
+ "resource_id": resource,
+ },
+ None
+ )
+
+
+class DisablePrimitive(ResourceWithStateTest):
+ def test_nonexistent_resource(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_primitive_cib_enabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.disable(self.env, ["B"], False),
+ fixture.report_not_found("B", "resources")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_nonexistent_resource_in_status(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_two_primitives_cib_enabled)
+ )
+ +
+ fixture.call_status(
+ fixture.state_complete(fixture_primitive_status_managed)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.disable(self.env, ["B"], False),
+ fixture.report_not_found("B")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_correct_resource(self):
+ self.assert_command_effect(
+ fixture_two_primitives_cib_enabled,
+ fixture_two_primitives_status_managed,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_two_primitives_cib_disabled
+ )
+
+ def test_unmanaged(self):
+ # The code doesn't care what causes the resource to be unmanaged
+ # (cluster property, resource's meta-attribute or whatever). It only
+ # checks the cluster state (crm_mon).
+ self.assert_command_effect(
+ fixture_primitive_cib_enabled,
+ fixture_primitive_status_unmanaged,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_primitive_cib_disabled,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+
+class EnablePrimitive(ResourceWithStateTest):
+ def test_nonexistent_resource(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_primitive_cib_disabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.enable(self.env, ["B"], False),
+ fixture.report_not_found("B", "resources")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_nonexistent_resource_in_status(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_two_primitives_cib_disabled)
+ )
+ +
+ fixture.call_status(
+ fixture.state_complete(fixture_primitive_status_managed)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.enable(self.env, ["B"], False),
+ fixture.report_not_found("B")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_correct_resource(self):
+ self.assert_command_effect(
+ fixture_two_primitives_cib_disabled_both,
+ fixture_two_primitives_status_managed,
+ lambda: resource.enable(self.env, ["B"], False),
+ fixture_two_primitives_cib_disabled
+ )
+
+ def test_unmanaged(self):
+ # The code doesn't care what causes the resource to be unmanaged
+ # (cluster property, resource's meta-attribute or whatever). It only
+ # checks the cluster state (crm_mon).
+ self.assert_command_effect(
+ fixture_primitive_cib_disabled,
+ fixture_primitive_status_unmanaged,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_primitive_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+
+class MoreResources(ResourceWithStateTest):
+ fixture_cib_enabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="D" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+ """
+ fixture_cib_disabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <meta_attributes id="B-meta_attributes">
+ <nvpair id="B-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ <meta_attributes id="C-meta_attributes">
+ <nvpair id="C-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="D" provider="heartbeat" type="Dummy">
+ <meta_attributes id="D-meta_attributes">
+ <nvpair id="D-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """
+ fixture_status = """
+ <resources>
+ <resource id="A" managed="true" />
+ <resource id="B" managed="false" />
+ <resource id="C" managed="true" />
+ <resource id="D" managed="false" />
+ </resources>
+ """
+ def test_success_enable(self):
+ fixture_enabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ <meta_attributes id="C-meta_attributes">
+ <nvpair id="C-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="D" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+ """
+ self.assert_command_effect(
+ self.fixture_cib_disabled,
+ self.fixture_status,
+ lambda: resource.enable(self.env, ["A", "B", "D"], False),
+ fixture_enabled,
+ reports=[
+ fixture_report_unmanaged("B"),
+ fixture_report_unmanaged("D"),
+ ]
+ )
+
+ def test_success_disable(self):
+ fixture_disabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <meta_attributes id="B-meta_attributes">
+ <nvpair id="B-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="D" provider="heartbeat" type="Dummy">
+ <meta_attributes id="D-meta_attributes">
+ <nvpair id="D-meta_attributes-target-role"
+ name="target-role" value="Stopped" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """
+ self.assert_command_effect(
+ self.fixture_cib_enabled,
+ self.fixture_status,
+ lambda: resource.disable(self.env, ["A", "B", "D"], False),
+ fixture_disabled,
+ reports=[
+ fixture_report_unmanaged("B"),
+ fixture_report_unmanaged("D"),
+ ]
+ )
+
+ def test_bad_resource_enable(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(self.fixture_cib_disabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.enable(self.env, ["B", "X", "Y", "A"], False),
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_bad_resource_disable(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(self.fixture_cib_enabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.disable(self.env, ["B", "X", "Y", "A"], False),
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ )
+ self.runner.assert_everything_launched()
+
+
+class Wait(ResourceWithStateTest):
+ fixture_status_running = """
+ <resources>
+ <resource id="A" managed="true" role="Started">
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ <resource id="B" managed="true" role="Started">
+ <node name="node2" id="1" cached="false"/>
+ </resource>
+ </resources>
+ """
+ fixture_status_stopped = """
+ <resources>
+ <resource id="A" managed="true" role="Stopped">
+ </resource>
+ <resource id="B" managed="true" role="Stopped">
+ </resource>
+ </resources>
+ """
+ fixture_status_mixed = """
+ <resources>
+ <resource id="A" managed="true" role="Stopped">
+ </resource>
+ <resource id="B" managed="true" role="Stopped">
+ </resource>
+ </resources>
+ """
+ fixture_wait_timeout_error = outdent(
+ """\
+ Pending actions:
+ Action 12: B-node2-stop on node2
+ Error performing operation: Timer expired
+ """
+ )
+
+ def test_enable_dont_wait_on_error(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_primitive_cib_disabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.enable(self.env, ["B"], 10),
+ fixture.report_not_found("B", "resources"),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_disable_dont_wait_on_error(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_primitive_cib_enabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.disable(self.env, ["B"], 10),
+ fixture.report_not_found("B", "resources"),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_enable_resource_stopped(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_two_primitives_cib_disabled_both,
+ self.fixture_status_stopped,
+ fixture_two_primitives_cib_enabled
+ )
+ +
+ fixture.call_wait(10)
+ +
+ fixture.call_status(
+ fixture.state_complete(self.fixture_status_stopped)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.enable(self.env, ["A", "B"], 10),
+ fixture.report_resource_not_running("A", severities.ERROR),
+ fixture.report_resource_not_running("B", severities.ERROR),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_disable_resource_stopped(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_two_primitives_cib_enabled,
+ self.fixture_status_running,
+ fixture_two_primitives_cib_disabled_both
+ )
+ +
+ fixture.call_wait(10)
+ +
+ fixture.call_status(
+ fixture.state_complete(self.fixture_status_stopped)
+ )
+ )
+
+ resource.disable(self.env, ["A", "B"], 10)
+ self.env.report_processor.assert_reports([
+ fixture.report_resource_not_running("A"),
+ fixture.report_resource_not_running("B"),
+ ])
+ self.runner.assert_everything_launched()
+
+ def test_enable_resource_running(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_two_primitives_cib_disabled_both,
+ self.fixture_status_stopped,
+ fixture_two_primitives_cib_enabled
+ )
+ +
+ fixture.call_wait(10)
+ +
+ fixture.call_status(
+ fixture.state_complete(self.fixture_status_running)
+ )
+ )
+
+ resource.enable(self.env, ["A", "B"], 10)
+
+ self.env.report_processor.assert_reports([
+ fixture.report_resource_running("A", {"Started": ["node1"]}),
+ fixture.report_resource_running("B", {"Started": ["node2"]}),
+ ])
+ self.runner.assert_everything_launched()
+
+ def test_disable_resource_running(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_two_primitives_cib_enabled,
+ self.fixture_status_running,
+ fixture_two_primitives_cib_disabled_both
+ )
+ +
+ fixture.call_wait(10)
+ +
+ fixture.call_status(
+ fixture.state_complete(self.fixture_status_running)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.disable(self.env, ["A", "B"], 10),
+ fixture.report_resource_running(
+ "A", {"Started": ["node1"]}, severities.ERROR
+ ),
+ fixture.report_resource_running(
+ "B", {"Started": ["node2"]}, severities.ERROR
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_enable_wait_timeout(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_primitive_cib_disabled,
+ self.fixture_status_stopped,
+ fixture_primitive_cib_enabled
+ )
+ +
+ fixture.call_wait(
+ 10, retval=62, stderr=self.fixture_wait_timeout_error
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.enable(self.env, ["A"], 10),
+ fixture.report_wait_for_idle_timed_out(
+ self.fixture_wait_timeout_error
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_disable_wait_timeout(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_primitive_cib_enabled,
+ self.fixture_status_running,
+ fixture_primitive_cib_disabled
+ )
+ +
+ fixture.call_wait(
+ 10, retval=62, stderr=self.fixture_wait_timeout_error
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.disable(self.env, ["A"], 10),
+ fixture.report_wait_for_idle_timed_out(
+ self.fixture_wait_timeout_error
+ ),
+ )
+ self.runner.assert_everything_launched()
+
+
+class WaitClone(ResourceWithStateTest):
+ fixture_status_running = """
+ <resources>
+ <clone id="A-clone" managed="true" multi_state="false" unique="false">
+ <resource id="A" managed="true" role="Started">
+ <node name="node1" id="1" cached="false"/>
+ </resource>
+ <resource id="A" managed="true" role="Started">
+ <node name="node2" id="2" cached="false"/>
+ </resource>
+ </clone>
+ </resources>
+ """
+ fixture_status_stopped = """
+ <resources>
+ <clone id="A-clone" managed="true" multi_state="false" unique="false">
+ <resource id="A" managed="true" role="Stopped">
+ </resource>
+ <resource id="A" managed="true" role="Stopped">
+ </resource>
+ </clone>
+ </resources>
+ """
+ def test_disable_clone(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_clone_cib_enabled,
+ self.fixture_status_running,
+ fixture_clone_cib_disabled_clone
+ )
+ +
+ fixture.call_wait(10)
+ +
+ fixture.call_status(
+ fixture.state_complete(self.fixture_status_stopped)
+ )
+ )
+
+ resource.disable(self.env, ["A-clone"], 10)
+ self.env.report_processor.assert_reports([
+ (
+ severities.INFO,
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ {
+ "resource_id": "A-clone",
+ },
+ None
+ )
+ ])
+ self.runner.assert_everything_launched()
+
+ def test_enable_clone(self):
+ self.runner.set_runs(
+ fixture.call_wait_supported()
+ +
+ fixture.calls_cib_and_status(
+ fixture_clone_cib_disabled_clone,
+ self.fixture_status_stopped,
+ fixture_clone_cib_enabled
+ )
+ +
+ fixture.call_wait(10)
+ +
+ fixture.call_status(
+ fixture.state_complete(self.fixture_status_running)
+ )
+ )
+
+ resource.enable(self.env, ["A-clone"], 10)
+
+ self.env.report_processor.assert_reports([
+ (
+ severities.INFO,
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ {
+ "resource_id": "A-clone",
+ "roles_with_nodes": {"Started": ["node1", "node2"]},
+ },
+ None
+ )
+ ])
+ self.runner.assert_everything_launched()
+
+
+class DisableGroup(ResourceWithStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_group_cib_enabled,
+ fixture_group_status_managed,
+ lambda: resource.disable(self.env, ["A1"], False),
+ fixture_group_cib_disabled_primitive
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_group_cib_enabled,
+ fixture_group_status_managed,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_group_cib_disabled_group
+ )
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_group_cib_enabled,
+ fixture_group_status_unmanaged,
+ lambda: resource.disable(self.env, ["A1"], False),
+ fixture_group_cib_disabled_primitive,
+ reports=[
+ fixture_report_unmanaged("A1"),
+ ]
+ )
+
+ def test_group_unmanaged(self):
+ self.assert_command_effect(
+ fixture_group_cib_enabled,
+ fixture_group_status_unmanaged,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_group_cib_disabled_group,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+
+class EnableGroup(ResourceWithStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_group_cib_disabled_primitive,
+ fixture_group_status_managed,
+ lambda: resource.enable(self.env, ["A1"], False),
+ fixture_group_cib_enabled
+ )
+
+ def test_primitive_disabled_both(self):
+ self.assert_command_effect(
+ fixture_group_cib_disabled_both,
+ fixture_group_status_managed,
+ lambda: resource.enable(self.env, ["A1"], False),
+ fixture_group_cib_disabled_group
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_group_cib_disabled_group,
+ fixture_group_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_group_cib_enabled
+ )
+
+ def test_group_both_disabled(self):
+ self.assert_command_effect(
+ fixture_group_cib_disabled_both,
+ fixture_group_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_group_cib_disabled_primitive
+ )
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_group_cib_disabled_primitive,
+ fixture_group_status_unmanaged,
+ lambda: resource.enable(self.env, ["A1"], False),
+ fixture_group_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A1"),
+ ]
+ )
+
+ def test_group_unmanaged(self):
+ self.assert_command_effect(
+ fixture_group_cib_disabled_group,
+ fixture_group_status_unmanaged,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_group_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+
+class DisableClone(ResourceWithStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_cib_enabled,
+ fixture_clone_status_managed,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_clone_cib_disabled_primitive
+ )
+
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_cib_enabled,
+ fixture_clone_status_managed,
+ lambda: resource.disable(self.env, ["A-clone"], False),
+ fixture_clone_cib_disabled_clone
+ )
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_cib_enabled,
+ fixture_clone_status_unmanaged,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_clone_cib_disabled_primitive,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+ def test_clone_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_cib_enabled,
+ fixture_clone_status_unmanaged,
+ lambda: resource.disable(self.env, ["A-clone"], False),
+ fixture_clone_cib_disabled_clone,
+ reports=[
+ fixture_report_unmanaged("A-clone"),
+ ]
+ )
+
+
+class EnableClone(ResourceWithStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_cib_disabled_primitive,
+ fixture_clone_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_clone_cib_enabled
+ )
+
+ def test_primitive_disabled_both(self):
+ self.assert_command_effect(
+ fixture_clone_cib_disabled_both,
+ fixture_clone_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_clone_cib_enabled
+ )
+
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_cib_disabled_clone,
+ fixture_clone_status_managed,
+ lambda: resource.enable(self.env, ["A-clone"], False),
+ fixture_clone_cib_enabled
+ )
+
+ def test_clone_disabled_both(self):
+ self.assert_command_effect(
+ fixture_clone_cib_disabled_both,
+ fixture_clone_status_managed,
+ lambda: resource.enable(self.env, ["A-clone"], False),
+ fixture_clone_cib_enabled
+ )
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_cib_disabled_primitive,
+ fixture_clone_status_unmanaged,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_clone_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A-clone"),
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+ def test_clone_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_cib_disabled_clone,
+ fixture_clone_status_unmanaged,
+ lambda: resource.enable(self.env, ["A-clone"], False),
+ fixture_clone_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A-clone"),
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+
+class DisableMaster(ResourceWithStateTest):
+ # same as clone, minimum tests in here
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_master_cib_enabled,
+ fixture_master_status_managed,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_master_cib_disabled_primitive
+ )
+
+ def test_master(self):
+ self.assert_command_effect(
+ fixture_master_cib_enabled,
+ fixture_master_status_managed,
+ lambda: resource.disable(self.env, ["A-master"], False),
+ fixture_master_cib_disabled_master
+ )
+
+
+class EnableMaster(ResourceWithStateTest):
+ # same as clone, minimum tests in here
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_master_cib_disabled_primitive,
+ fixture_master_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_master_cib_enabled
+ )
+
+ def test_primitive_disabled_both(self):
+ self.assert_command_effect(
+ fixture_master_cib_disabled_both,
+ fixture_master_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_master_cib_enabled
+ )
+
+ def test_master(self):
+ self.assert_command_effect(
+ fixture_master_cib_disabled_master,
+ fixture_master_status_managed,
+ lambda: resource.enable(self.env, ["A-master"], False),
+ fixture_master_cib_enabled
+ )
+
+ def test_master_disabled_both(self):
+ self.assert_command_effect(
+ fixture_master_cib_disabled_both,
+ fixture_master_status_managed,
+ lambda: resource.enable(self.env, ["A-master"], False),
+ fixture_master_cib_enabled
+ )
+
+class DisableClonedGroup(ResourceWithStateTest):
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_enabled,
+ fixture_clone_group_status_managed,
+ lambda: resource.disable(self.env, ["A-clone"], False),
+ fixture_clone_group_cib_disabled_clone
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_enabled,
+ fixture_clone_group_status_managed,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_clone_group_cib_disabled_group
+ )
+
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_enabled,
+ fixture_clone_group_status_managed,
+ lambda: resource.disable(self.env, ["A1"], False),
+ fixture_clone_group_cib_disabled_primitive
+ )
+
+ def test_clone_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_enabled,
+ fixture_clone_group_status_unmanaged,
+ lambda: resource.disable(self.env, ["A-clone"], False),
+ fixture_clone_group_cib_disabled_clone,
+ reports=[
+ fixture_report_unmanaged("A-clone"),
+ ]
+ )
+
+ def test_group_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_enabled,
+ fixture_clone_group_status_unmanaged,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_clone_group_cib_disabled_group,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_enabled,
+ fixture_clone_group_status_unmanaged,
+ lambda: resource.disable(self.env, ["A1"], False),
+ fixture_clone_group_cib_disabled_primitive,
+ reports=[
+ fixture_report_unmanaged("A1"),
+ ]
+ )
+
+
+class EnableClonedGroup(ResourceWithStateTest):
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_clone,
+ fixture_clone_group_status_managed,
+ lambda: resource.enable(self.env, ["A-clone"], False),
+ fixture_clone_group_cib_enabled,
+ )
+
+ def test_clone_disabled_all(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_all,
+ fixture_clone_group_status_managed,
+ lambda: resource.enable(self.env, ["A-clone"], False),
+ fixture_clone_group_cib_disabled_primitive
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_group,
+ fixture_clone_group_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_clone_group_cib_enabled
+ )
+
+ def test_group_disabled_all(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_all,
+ fixture_clone_group_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_clone_group_cib_disabled_primitive
+ )
+
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_primitive,
+ fixture_clone_group_status_managed,
+ lambda: resource.enable(self.env, ["A1"], False),
+ fixture_clone_group_cib_enabled
+ )
+
+ def test_primitive_disabled_all(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_all,
+ fixture_clone_group_status_managed,
+ lambda: resource.enable(self.env, ["A1"], False),
+ fixture_clone_group_cib_disabled_clone_group
+ )
+
+ def test_clone_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_clone,
+ fixture_clone_group_status_unmanaged,
+ lambda: resource.enable(self.env, ["A-clone"], False),
+ fixture_clone_group_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A-clone"),
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+ def test_group_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_group,
+ fixture_clone_group_status_unmanaged,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_clone_group_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A"),
+ fixture_report_unmanaged("A-clone"),
+ ]
+ )
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_disabled_primitive,
+ fixture_clone_group_status_unmanaged,
+ lambda: resource.enable(self.env, ["A1"], False),
+ fixture_clone_group_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A1"),
+ ]
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class DisableBundle(ResourceWithStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_bundle_cib_enabled,
+ fixture_bundle_status_managed,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_bundle_cib_disabled_primitive
+ )
+
+ def test_bundle(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_bundle_cib_enabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.disable(self.env, ["A-bundle"], False),
+ fixture.report_not_for_bundles("A-bundle")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_bundle_cib_enabled,
+ fixture_bundle_status_unmanaged,
+ lambda: resource.disable(self.env, ["A"], False),
+ fixture_bundle_cib_disabled_primitive,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class EnableBundle(ResourceWithStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_bundle_cib_disabled_primitive,
+ fixture_bundle_status_managed,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_bundle_cib_enabled
+ )
+
+ def test_bundle(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_bundle_cib_enabled)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.enable(self.env, ["A-bundle"], False),
+ fixture.report_not_for_bundles("A-bundle")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_bundle_cib_disabled_primitive,
+ fixture_bundle_status_unmanaged,
+ lambda: resource.enable(self.env, ["A"], False),
+ fixture_bundle_cib_enabled,
+ reports=[
+ fixture_report_unmanaged("A"),
+ ]
+ )
diff --git a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
new file mode 100644
index 0000000..6d8c787
--- /dev/null
+++ b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
@@ -0,0 +1,1092 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+
+from pcs.common import report_codes
+from pcs.lib.commands import resource
+from pcs.lib.commands.test.resource.common import ResourceWithoutStateTest
+import pcs.lib.commands.test.resource.fixture as fixture
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+
+
+fixture_primitive_cib_managed = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+"""
+fixture_primitive_cib_unmanaged = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+"""
+
+fixture_primitive_cib_managed_op_enabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Stateful">
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor-m" name="monitor" role="Master" />
+ <op id="A-monitor-s" name="monitor" role="Slave" />
+ </operations>
+ </primitive>
+ </resources>
+"""
+fixture_primitive_cib_managed_op_disabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Stateful">
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor-m" name="monitor" role="Master"
+ enabled="false" />
+ <op id="A-monitor-s" name="monitor" role="Slave"
+ enabled="false" />
+ </operations>
+ </primitive>
+ </resources>
+"""
+fixture_primitive_cib_unmanaged_op_enabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Stateful">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor-m" name="monitor" role="Master" />
+ <op id="A-monitor-s" name="monitor" role="Slave" />
+ </operations>
+ </primitive>
+ </resources>
+"""
+fixture_primitive_cib_unmanaged_op_disabled = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Stateful">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor-m" name="monitor" role="Master"
+ enabled="false" />
+ <op id="A-monitor-s" name="monitor" role="Slave"
+ enabled="false" />
+ </operations>
+ </primitive>
+ </resources>
+"""
+
+fixture_group_cib_managed = """
+ <resources>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </group>
+ </resources>
+"""
+fixture_group_cib_unmanaged_resource = """
+ <resources>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </group>
+ </resources>
+"""
+fixture_group_cib_unmanaged_resource_and_group = """
+ <resources>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </group>
+ </resources>
+"""
+fixture_group_cib_unmanaged_all_resources = """
+ <resources>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A2-meta_attributes">
+ <nvpair id="A2-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </group>
+ </resources>
+"""
+
+fixture_clone_cib_managed = """
+ <resources>
+ <clone id="A-clone">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_cib_unmanaged_clone = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_cib_unmanaged_primitive = """
+ <resources>
+ <clone id="A-clone">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_cib_unmanaged_both = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </clone>
+ </resources>
+"""
+
+fixture_clone_cib_managed_op_enabled = """
+ <resources>
+ <clone id="A-clone">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor" name="monitor"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+"""
+fixture_clone_cib_unmanaged_primitive_op_disabled = """
+ <resources>
+ <clone id="A-clone">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor" name="monitor" enabled="false"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+"""
+
+fixture_master_cib_managed = """
+ <resources>
+ <master id="A-master">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_cib_unmanaged_master = """
+ <resources>
+ <master id="A-master">
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_cib_unmanaged_primitive = """
+ <resources>
+ <master id="A-master">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_cib_unmanaged_both = """
+ <resources>
+ <master id="A-master">
+ <meta_attributes id="A-master-meta_attributes">
+ <nvpair id="A-master-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </master>
+ </resources>
+"""
+
+fixture_master_cib_managed_op_enabled = """
+ <resources>
+ <master id="A-master">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor" name="monitor"/>
+ </operations>
+ </primitive>
+ </master>
+ </resources>
+"""
+fixture_master_cib_unmanaged_primitive_op_disabled = """
+ <resources>
+ <master id="A-master">
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <operations>
+ <op id="A-start" name="start" />
+ <op id="A-stop" name="stop" />
+ <op id="A-monitor" name="monitor" enabled="false"/>
+ </operations>
+ </primitive>
+ </master>
+ </resources>
+"""
+
+fixture_clone_group_cib_managed = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_unmanaged_primitive = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_unmanaged_all_primitives = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A2-meta_attributes">
+ <nvpair id="A2-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_unmanaged_clone = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_unmanaged_everything = """
+ <resources>
+ <clone id="A-clone">
+ <meta_attributes id="A-clone-meta_attributes">
+ <nvpair id="A-clone-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <group id="A">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A2-meta_attributes">
+ <nvpair id="A2-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+
+fixture_clone_group_cib_managed_op_enabled = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A1-start" name="start" />
+ <op id="A1-stop" name="stop" />
+ <op id="A1-monitor" name="monitor" />
+ </operations>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A2-start" name="start" />
+ <op id="A2-stop" name="stop" />
+ <op id="A2-monitor" name="monitor" />
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_unmanaged_primitive_op_disabled = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <operations>
+ <op id="A1-start" name="start" />
+ <op id="A1-stop" name="stop" />
+ <op id="A1-monitor" name="monitor" enabled="false" />
+ </operations>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="A2-start" name="start" />
+ <op id="A2-stop" name="stop" />
+ <op id="A2-monitor" name="monitor" />
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+fixture_clone_group_cib_unmanaged_all_primitives_op_disabled = """
+ <resources>
+ <clone id="A-clone">
+ <group id="A">
+ <primitive id="A1" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A1-meta_attributes">
+ <nvpair id="A1-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <operations>
+ <op id="A1-start" name="start" />
+ <op id="A1-stop" name="stop" />
+ <op id="A1-monitor" name="monitor" enabled="false" />
+ </operations>
+ </primitive>
+ <primitive id="A2" class="ocf" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="A2-meta_attributes">
+ <nvpair id="A2-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ <operations>
+ <op id="A2-start" name="start" />
+ <op id="A2-stop" name="stop" />
+ <op id="A2-monitor" name="monitor" enabled="false" />
+ </operations>
+ </primitive>
+ </group>
+ </clone>
+ </resources>
+"""
+
+fixture_bundle_cib_managed = """
+ <resources>
+ <bundle id="A-bundle">
+ <docker image="pcs:test" />
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ </primitive>
+ </bundle>
+ </resources>
+"""
+
+fixture_bundle_cib_unmanaged_primitive = """
+ <resources>
+ <bundle id="A-bundle">
+ <docker image="pcs:test" />
+ <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </bundle>
+ </resources>
+"""
+
+def fixture_report_no_monitors(resource):
+ return (
+ severities.WARNING,
+ report_codes.RESOURCE_MANAGED_NO_MONITOR_ENABLED,
+ {
+ "resource_id": resource,
+ },
+ None
+ )
+
+
+class UnmanagePrimitive(ResourceWithoutStateTest):
+ def test_nonexistent_resource(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_primitive_cib_managed)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.unmanage(self.env, ["B"]),
+ fixture.report_not_found("B", "resources")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_managed,
+ lambda: resource.unmanage(self.env, ["A"]),
+ fixture_primitive_cib_unmanaged
+ )
+
+ def test_primitive_unmanaged(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_unmanaged,
+ lambda: resource.unmanage(self.env, ["A"]),
+ fixture_primitive_cib_unmanaged
+ )
+
+
+class ManagePrimitive(ResourceWithoutStateTest):
+ def test_nonexistent_resource(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_primitive_cib_unmanaged)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.manage(self.env, ["B"]),
+ fixture.report_not_found("B", "resources")
+ )
+ self.runner.assert_everything_launched()
+
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_unmanaged,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_primitive_cib_managed
+ )
+
+ def test_primitive_managed(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_managed,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_primitive_cib_managed
+ )
+
+
+class UnmanageGroup(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_group_cib_managed,
+ lambda: resource.unmanage(self.env, ["A1"]),
+ fixture_group_cib_unmanaged_resource
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_group_cib_managed,
+ lambda: resource.unmanage(self.env, ["A"]),
+ fixture_group_cib_unmanaged_all_resources
+ )
+
+
+class ManageGroup(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_group_cib_unmanaged_all_resources,
+ lambda: resource.manage(self.env, ["A2"]),
+ fixture_group_cib_unmanaged_resource
+ )
+
+ def test_primitive_unmanaged_group(self):
+ self.assert_command_effect(
+ fixture_group_cib_unmanaged_resource_and_group,
+ lambda: resource.manage(self.env, ["A1"]),
+ fixture_group_cib_managed
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_group_cib_unmanaged_all_resources,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_group_cib_managed
+ )
+
+ def test_group_unmanaged_group(self):
+ self.assert_command_effect(
+ fixture_group_cib_unmanaged_resource_and_group,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_group_cib_managed
+ )
+
+
+class UnmanageClone(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_cib_managed,
+ lambda: resource.unmanage(self.env, ["A"]),
+ fixture_clone_cib_unmanaged_primitive
+ )
+
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_cib_managed,
+ lambda: resource.unmanage(self.env, ["A-clone"]),
+ fixture_clone_cib_unmanaged_primitive
+ )
+
+
+class ManageClone(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_cib_unmanaged_clone,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_clone_cib_managed
+ )
+
+ def test_primitive_unmanaged_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_cib_unmanaged_primitive,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_clone_cib_managed
+ )
+
+ def test_primitive_unmanaged_both(self):
+ self.assert_command_effect(
+ fixture_clone_cib_unmanaged_both,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_clone_cib_managed
+ )
+
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_cib_unmanaged_clone,
+ lambda: resource.manage(self.env, ["A-clone"]),
+ fixture_clone_cib_managed
+ )
+
+ def test_clone_unmanaged_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_cib_unmanaged_primitive,
+ lambda: resource.manage(self.env, ["A-clone"]),
+ fixture_clone_cib_managed
+ )
+
+ def test_clone_unmanaged_both(self):
+ self.assert_command_effect(
+ fixture_clone_cib_unmanaged_both,
+ lambda: resource.manage(self.env, ["A-clone"]),
+ fixture_clone_cib_managed
+ )
+
+
+class UnmanageMaster(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_master_cib_managed,
+ lambda: resource.unmanage(self.env, ["A"]),
+ fixture_master_cib_unmanaged_primitive
+ )
+
+ def test_master(self):
+ self.assert_command_effect(
+ fixture_master_cib_managed,
+ lambda: resource.unmanage(self.env, ["A-master"]),
+ fixture_master_cib_unmanaged_primitive
+ )
+
+
+class ManageMaster(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_master_cib_unmanaged_primitive,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_master_cib_managed
+ )
+
+ def test_primitive_unmanaged_master(self):
+ self.assert_command_effect(
+ fixture_master_cib_unmanaged_master,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_master_cib_managed
+ )
+
+ def test_primitive_unmanaged_both(self):
+ self.assert_command_effect(
+ fixture_master_cib_unmanaged_both,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_master_cib_managed
+ )
+
+ def test_master(self):
+ self.assert_command_effect(
+ fixture_master_cib_unmanaged_master,
+ lambda: resource.manage(self.env, ["A-master"]),
+ fixture_master_cib_managed
+ )
+
+ def test_master_unmanaged_primitive(self):
+ self.assert_command_effect(
+ fixture_master_cib_unmanaged_primitive,
+ lambda: resource.manage(self.env, ["A-master"]),
+ fixture_master_cib_managed
+ )
+
+ def test_master_unmanaged_both(self):
+ self.assert_command_effect(
+ fixture_master_cib_unmanaged_both,
+ lambda: resource.manage(self.env, ["A-master"]),
+ fixture_master_cib_managed
+ )
+
+
+class UnmanageClonedGroup(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_managed,
+ lambda: resource.unmanage(self.env, ["A1"]),
+ fixture_clone_group_cib_unmanaged_primitive
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_managed,
+ lambda: resource.unmanage(self.env, ["A"]),
+ fixture_clone_group_cib_unmanaged_all_primitives
+ )
+
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_managed,
+ lambda: resource.unmanage(self.env, ["A-clone"]),
+ fixture_clone_group_cib_unmanaged_all_primitives
+ )
+
+
+class ManageClonedGroup(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_unmanaged_primitive,
+ lambda: resource.manage(self.env, ["A1"]),
+ fixture_clone_group_cib_managed
+ )
+
+ def test_primitive_unmanaged_all(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_unmanaged_everything,
+ lambda: resource.manage(self.env, ["A2"]),
+ fixture_clone_group_cib_unmanaged_primitive
+ )
+
+ def test_group(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_unmanaged_all_primitives,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_clone_group_cib_managed
+ )
+
+ def test_group_unmanaged_all(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_unmanaged_everything,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_clone_group_cib_managed
+ )
+
+ def test_clone(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_unmanaged_clone,
+ lambda: resource.manage(self.env, ["A-clone"]),
+ fixture_clone_group_cib_managed
+ )
+
+ def test_clone_unmanaged_all(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_unmanaged_everything,
+ lambda: resource.manage(self.env, ["A-clone"]),
+ fixture_clone_group_cib_managed
+ )
+
+
+class UnmanageBundle(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_bundle_cib_managed,
+ lambda: resource.unmanage(self.env, ["A"]),
+ fixture_bundle_cib_unmanaged_primitive
+ )
+
+ def test_bundle(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_bundle_cib_managed)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.unmanage(self.env, ["A-bundle"], False),
+ fixture.report_not_for_bundles("A-bundle")
+ )
+ self.runner.assert_everything_launched()
+
+
+class ManageBundle(ResourceWithoutStateTest):
+ def test_primitive(self):
+ self.assert_command_effect(
+ fixture_bundle_cib_unmanaged_primitive,
+ lambda: resource.manage(self.env, ["A"]),
+ fixture_bundle_cib_managed,
+ )
+
+ def test_bundle(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(fixture_bundle_cib_unmanaged_primitive)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.manage(self.env, ["A-bundle"], False),
+ fixture.report_not_for_bundles("A-bundle")
+ )
+ self.runner.assert_everything_launched()
+
+
+class MoreResources(ResourceWithoutStateTest):
+ fixture_cib_managed = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+ """
+ fixture_cib_unmanaged = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <meta_attributes id="B-meta_attributes">
+ <nvpair id="B-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ <meta_attributes id="C-meta_attributes">
+ <nvpair id="C-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """
+
+ def test_success_unmanage(self):
+ fixture_cib_unmanaged = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ <meta_attributes id="C-meta_attributes">
+ <nvpair id="C-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ </resources>
+ """
+ self.assert_command_effect(
+ self.fixture_cib_managed,
+ lambda: resource.unmanage(self.env, ["A", "C"]),
+ fixture_cib_unmanaged
+ )
+
+ def test_success_manage(self):
+ fixture_cib_managed = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <meta_attributes id="B-meta_attributes">
+ <nvpair id="B-meta_attributes-is-managed"
+ name="is-managed" value="false" />
+ </meta_attributes>
+ </primitive>
+ <primitive class="ocf" id="C" provider="heartbeat" type="Dummy">
+ </primitive>
+ </resources>
+ """
+ self.assert_command_effect(
+ self.fixture_cib_unmanaged,
+ lambda: resource.manage(self.env, ["A", "C"]),
+ fixture_cib_managed
+ )
+
+ def test_bad_resource_unmanage(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(self.fixture_cib_managed)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.unmanage(self.env, ["B", "X", "Y", "A"]),
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ )
+ self.runner.assert_everything_launched()
+
+ def test_bad_resource_enable(self):
+ self.runner.set_runs(
+ fixture.call_cib_load(
+ fixture.cib_resources(self.fixture_cib_unmanaged)
+ )
+ )
+
+ assert_raise_library_error(
+ lambda: resource.manage(self.env, ["B", "X", "Y", "A"]),
+ fixture.report_not_found("X", "resources"),
+ fixture.report_not_found("Y", "resources"),
+ )
+ self.runner.assert_everything_launched()
+
+
+class WithMonitor(ResourceWithoutStateTest):
+ def test_unmanage_noop(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_managed,
+ lambda: resource.unmanage(self.env, ["A"], True),
+ fixture_primitive_cib_unmanaged
+ )
+
+ def test_manage_noop(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_unmanaged,
+ lambda: resource.manage(self.env, ["A"], True),
+ fixture_primitive_cib_managed
+ )
+
+ def test_unmanage(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A"], True),
+ fixture_primitive_cib_unmanaged_op_disabled
+ )
+
+ def test_manage(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_unmanaged_op_disabled,
+ lambda: resource.manage(self.env, ["A"], True),
+ fixture_primitive_cib_managed_op_enabled
+ )
+
+ def test_unmanage_enabled_monitors(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A"], False),
+ fixture_primitive_cib_unmanaged_op_enabled
+ )
+
+ def test_manage_disabled_monitors(self):
+ self.assert_command_effect(
+ fixture_primitive_cib_unmanaged_op_disabled,
+ lambda: resource.manage(self.env, ["A"], False),
+ fixture_primitive_cib_managed_op_disabled,
+ [
+ fixture_report_no_monitors("A"),
+ ]
+ )
+
+ def test_unmanage_clone(self):
+ self.assert_command_effect(
+ fixture_clone_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A-clone"], True),
+ fixture_clone_cib_unmanaged_primitive_op_disabled
+ )
+
+ def test_unmanage_in_clone(self):
+ self.assert_command_effect(
+ fixture_clone_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A"], True),
+ fixture_clone_cib_unmanaged_primitive_op_disabled
+ )
+
+ def test_unmanage_master(self):
+ self.assert_command_effect(
+ fixture_master_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A-master"], True),
+ fixture_master_cib_unmanaged_primitive_op_disabled
+ )
+
+ def test_unmanage_in_master(self):
+ self.assert_command_effect(
+ fixture_master_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A"], True),
+ fixture_master_cib_unmanaged_primitive_op_disabled
+ )
+
+ def test_unmanage_clone_with_group(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A-clone"], True),
+ fixture_clone_group_cib_unmanaged_all_primitives_op_disabled
+ )
+
+ def test_unmanage_group_in_clone(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A"], True),
+ fixture_clone_group_cib_unmanaged_all_primitives_op_disabled
+ )
+
+ def test_unmanage_in_cloned_group(self):
+ self.assert_command_effect(
+ fixture_clone_group_cib_managed_op_enabled,
+ lambda: resource.unmanage(self.env, ["A1"], True),
+ fixture_clone_group_cib_unmanaged_primitive_op_disabled
+ )
diff --git a/pcs/lib/commands/test/test_acl.py b/pcs/lib/commands/test/test_acl.py
index e756c6e..c18f63c 100644
--- a/pcs/lib/commands/test/test_acl.py
+++ b/pcs/lib/commands/test/test_acl.py
@@ -5,24 +5,12 @@ from __future__ import (
unicode_literals,
)
-
-from pcs.test.tools.assertions import (
- assert_raise_library_error,
- ExtendedAssertionsMixin,
-)
+import pcs.lib.commands.acl as cmd_acl
+from pcs.lib.env import LibraryEnvironment
+from pcs.test.tools.assertions import ExtendedAssertionsMixin
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import mock, TestCase
-from pcs.common import report_codes
-from pcs.lib.errors import (
- LibraryError,
- ReportItemSeverity as Severities,
-)
-from pcs.lib.env import LibraryEnvironment
-
-import pcs.lib.commands.acl as cmd_acl
-import pcs.lib.cib.acl as acl_lib
-
REQUIRED_CIB_VERSION = (2, 0, 0)
@@ -44,8 +32,26 @@ class AclCommandsTest(TestCase, ExtendedAssertionsMixin):
def assert_cib_not_pushed(self):
self.assertEqual(0, self.mock_env.push_cib.call_count)
-
-
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
+class CibAclSection(TestCase):
+ def test_push_cib_on_success(self):
+ env = mock.MagicMock()
+ env.get_cib = mock.Mock(return_value="cib")
+ with cmd_acl.cib_acl_section(env):
+ pass
+ env.get_cib.assert_called_once_with(cmd_acl.REQUIRED_CIB_VERSION)
+ env.push_cib.assert_called_once_with("cib")
+
+ def test_does_not_push_cib_on_exception(self):
+ env = mock.MagicMock()
+ def run():
+ with cmd_acl.cib_acl_section(env):
+ raise AssertionError()
+ self.assertRaises(AssertionError, run)
+ env.get_cib.assert_called_once_with(cmd_acl.REQUIRED_CIB_VERSION)
+ env.push_cib.assert_not_called()
+
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.validate_permissions")
@mock.patch("pcs.lib.cib.acl.create_role")
@mock.patch("pcs.lib.cib.acl.add_permissions_to_role")
@@ -72,358 +78,99 @@ class CreateRoleTest(AclCommandsTest):
self.assert_same_cib_pushed()
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.remove_role")
class RemoveRoleTest(AclCommandsTest):
- def test_success_no_autodelete(self, mock_remove):
+ def test_success(self, mock_remove):
cmd_acl.remove_role(self.mock_env, "role_id", False)
self.assert_get_cib_called()
mock_remove.assert_called_once_with(self.cib, "role_id", False)
self.assert_same_cib_pushed()
- def test_success_autodelete(self, mock_remove):
- cmd_acl.remove_role(self.mock_env, "role_id", True)
- self.assert_get_cib_called()
- mock_remove.assert_called_once_with(self.cib, "role_id", True)
- self.assert_same_cib_pushed()
- def test_role_not_found(self, mock_remove):
- mock_remove.side_effect = acl_lib.AclRoleNotFound("role_id")
- assert_raise_library_error(
- lambda: cmd_acl.remove_role(self.mock_env, "role_id", True),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "role_id",
- "id_description": "role",
- }
- )
- )
- self.assert_get_cib_called()
- mock_remove.assert_called_once_with(self.cib, "role_id", True)
- self.assert_cib_not_pushed()
-
-
- at mock.patch("pcs.lib.commands.acl._get_target_or_group")
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
+ at mock.patch("pcs.lib.cib.acl.find_target_or_group")
@mock.patch("pcs.lib.cib.acl.assign_role")
- at mock.patch("pcs.lib.cib.acl.find_role")
- at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
class AssignRoleNotSpecific(AclCommandsTest, ExtendedAssertionsMixin):
- def test_success(
- self, mock_error_convert, mock_find_role, mock_assign, mock_get_tg
- ):
- mock_get_tg.return_value = "target_el"
- mock_find_role.return_value = "role_el"
+ def test_success(self, mock_assign, find_target_or_group):
+ find_target_or_group.return_value = "target_el"
cmd_acl.assign_role_not_specific(self.mock_env, "role_id", "target_id")
self.assert_get_cib_called()
- mock_get_tg.assert_called_once_with(self.cib, "target_id")
- mock_find_role.assert_called_once_with(self.cib, "role_id")
- mock_assign.assert_called_once_with("target_el", "role_el")
- self.assertEqual(0, mock_error_convert.call_count)
+ find_target_or_group.assert_called_once_with(self.cib, "target_id")
+ mock_assign.assert_called_once_with(self.cib, "role_id", "target_el")
self.assert_same_cib_pushed()
- def test_failure(
- self, mock_error_convert, mock_find_role, mock_assign, mock_get_tg
- ):
- mock_get_tg.return_value = "target_el"
- exception_obj = acl_lib.AclRoleNotFound("role_id")
- mock_find_role.side_effect = exception_obj
- self.assert_raises(
- LibraryError,
- lambda: cmd_acl.assign_role_not_specific(
- self.mock_env, "role_id", "target_id"
- )
- )
- self.assert_get_cib_called()
- self.assertEqual(0, mock_assign.call_count)
- mock_error_convert.assert_called_once_with(exception_obj)
- self.assert_cib_not_pushed()
-
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.find_target")
- at mock.patch("pcs.lib.cib.acl.find_group")
-class GetTargetOrGroupTest(AclCommandsTest):
- def test_target(self, mock_find_group, mock_find_target):
- mock_find_target.return_value = "target_el"
- self.assertEqual(
- "target_el", cmd_acl._get_target_or_group(self.cib, "target_id")
- )
- mock_find_target.assert_called_once_with(self.cib, "target_id")
- self.assertEqual(0, mock_find_group.call_count)
-
- def test_group(self, mock_find_group, mock_find_target):
- mock_find_target.side_effect = acl_lib.AclTargetNotFound("group_id")
- mock_find_group.return_value = "group_el"
- self.assertEqual(
- "group_el", cmd_acl._get_target_or_group(self.cib, "group_id")
- )
- mock_find_target.assert_called_once_with(self.cib, "group_id")
- mock_find_group.assert_called_once_with(self.cib, "group_id")
-
- def test_not_found(self, mock_find_group, mock_find_target):
- mock_find_target.side_effect = acl_lib.AclTargetNotFound("id")
- mock_find_group.side_effect = acl_lib.AclGroupNotFound("id")
- assert_raise_library_error(
- lambda: cmd_acl._get_target_or_group(self.cib, "id"),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "id",
- "id_description": "user/group",
- }
- )
- )
- mock_find_target.assert_called_once_with(self.cib, "id")
- mock_find_group.assert_called_once_with(self.cib, "id")
-
-
@mock.patch("pcs.lib.cib.acl.assign_role")
- at mock.patch("pcs.lib.cib.acl.find_role")
- at mock.patch("pcs.lib.cib.acl.find_target")
- at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
class AssignRoleToTargetTest(AclCommandsTest):
- def test_success(
- self, mock_error_convert, mock_target, mock_role, mock_assign
- ):
- mock_target.return_value = "target_el"
- mock_role.return_value = "role_el"
+ def test_success(self, mock_assign, find_target):
+ find_target.return_value = "target_el"
cmd_acl.assign_role_to_target(self.mock_env, "role_id", "target_id")
self.assert_get_cib_called()
- mock_target.assert_called_once_with(self.cib, "target_id")
- mock_role.assert_called_once_with(self.cib, "role_id")
- mock_assign.assert_called_once_with("target_el", "role_el")
+ mock_assign.assert_called_once_with(self.cib, "role_id", "target_el")
self.assert_same_cib_pushed()
- self.assertEqual(0, mock_error_convert.call_count)
-
- def test_failure(
- self, mock_error_convert, mock_target, mock_role, mock_assign
- ):
- exception_obj = acl_lib.AclTargetNotFound("target_id")
- mock_target.side_effect = exception_obj
- mock_role.return_value = "role_el"
- self.assert_raises(
- LibraryError,
- lambda: cmd_acl.assign_role_to_target(
- self.mock_env, "role_id", "target_id"
- )
- )
- self.assert_get_cib_called()
- mock_target.assert_called_once_with(self.cib, "target_id")
- mock_error_convert.assert_called_once_with(exception_obj)
- self.assertEqual(0, mock_assign.call_count)
- self.assert_cib_not_pushed()
-
- at mock.patch("pcs.lib.cib.acl.assign_role")
- at mock.patch("pcs.lib.cib.acl.find_role")
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.find_group")
- at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
+ at mock.patch("pcs.lib.cib.acl.assign_role")
class AssignRoleToGroupTest(AclCommandsTest):
- def test_success(
- self, mock_error_convert, mock_group, mock_role, mock_assign
- ):
- mock_group.return_value = "group_el"
- mock_role.return_value = "role_el"
+ def test_success(self, mock_assign, find_group):
+ find_group.return_value = "group_el"
cmd_acl.assign_role_to_group(self.mock_env, "role_id", "group_id")
self.assert_get_cib_called()
- mock_group.assert_called_once_with(self.cib, "group_id")
- mock_role.assert_called_once_with(self.cib, "role_id")
- mock_assign.assert_called_once_with("group_el", "role_el")
+ mock_assign.assert_called_once_with(self.cib, "role_id", "group_el")
self.assert_same_cib_pushed()
- self.assertEqual(0, mock_error_convert.call_count)
-
- def test_failure(
- self, mock_error_convert, mock_group, mock_role, mock_assign
- ):
- exception_obj = acl_lib.AclGroupNotFound("group_id")
- mock_group.side_effect = exception_obj
- mock_role.return_value = "role_el"
- self.assert_raises(
- LibraryError,
- lambda: cmd_acl.assign_role_to_group(
- self.mock_env, "role_id", "group_id"
- )
- )
- self.assert_get_cib_called()
- mock_group.assert_called_once_with(self.cib, "group_id")
- mock_error_convert.assert_called_once_with(exception_obj)
- self.assertEqual(0, mock_assign.call_count)
- self.assert_cib_not_pushed()
-
- at mock.patch("pcs.lib.commands.acl._get_target_or_group")
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.unassign_role")
+ at mock.patch("pcs.lib.cib.acl.find_target_or_group")
class UnassignRoleNotSpecificTest(AclCommandsTest):
- def test_success(self, mock_unassign, mock_tg):
- mock_tg.return_value = "target_el"
+ def test_success(self, find_target_or_group, mock_unassign):
+ find_target_or_group.return_value = "target_el"
cmd_acl.unassign_role_not_specific(
self.mock_env, "role_id", "target_id", False
)
self.assert_get_cib_called()
- mock_tg.assert_called_once_with(self.cib, "target_id")
+ find_target_or_group.assert_called_once_with(self.cib, "target_id")
mock_unassign.assert_called_once_with("target_el", "role_id", False)
self.assert_same_cib_pushed()
- def test_success_with_autodelete(self, mock_unassign, mock_tg):
- mock_tg.return_value = "target_el"
- cmd_acl.unassign_role_not_specific(
- self.mock_env, "role_id", "target_id", True
- )
- self.assert_get_cib_called()
- mock_tg.assert_called_once_with(self.cib, "target_id")
- mock_unassign.assert_called_once_with("target_el", "role_id", True)
- self.assert_same_cib_pushed()
-
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.unassign_role")
@mock.patch("pcs.lib.cib.acl.find_target")
- at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
class UnassignRoleFromTargetTest(AclCommandsTest):
- def test_success(self, mock_error_convert, mock_find_el, mock_unassign):
- mock_find_el.return_value = "el"
+ def test_success(self, find_target, mock_unassign):
+ find_target.return_value = "el"
cmd_acl.unassign_role_from_target(
self.mock_env, "role_id", "el_id", False
)
self.assert_get_cib_called()
- mock_find_el.assert_called_once_with(self.cib, "el_id")
+ find_target.assert_called_once_with(self.cib, "el_id")
mock_unassign.assert_called_once_with("el", "role_id", False)
self.assert_same_cib_pushed()
- self.assertEqual(0, mock_error_convert.call_count)
-
- def test_success_autodelete(
- self, mock_error_convert, mock_find_el, mock_unassign
- ):
- mock_find_el.return_value = "el"
- cmd_acl.unassign_role_from_target(
- self.mock_env, "role_id", "el_id", True
- )
- self.assert_get_cib_called()
- mock_find_el.assert_called_once_with(self.cib, "el_id")
- mock_unassign.assert_called_once_with("el", "role_id", True)
- self.assert_same_cib_pushed()
- self.assertEqual(0, mock_error_convert.call_count)
-
- def test_failure(self, mock_error_convert, mock_find_el, mock_unassign):
- exception_obj = acl_lib.AclTargetNotFound("el_id")
- mock_find_el.side_effect = exception_obj
- self.assert_raises(
- LibraryError,
- lambda: cmd_acl.unassign_role_from_target(
- self.mock_env, "role_id", "el_id", False
- )
- )
- self.assert_get_cib_called()
- mock_find_el.assert_called_once_with(self.cib, "el_id")
- self.assertEqual(0, mock_unassign.call_count)
- self.assert_cib_not_pushed()
- mock_error_convert.assert_called_once_with(exception_obj)
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.unassign_role")
@mock.patch("pcs.lib.cib.acl.find_group")
- at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
class UnassignRoleFromGroupTest(AclCommandsTest):
- def test_success(self, mock_error_convert, mock_find_el, mock_unassign):
- mock_find_el.return_value = "el"
+ def test_success(self, find_group, mock_unassign):
+ find_group.return_value = "el"
cmd_acl.unassign_role_from_group(
self.mock_env, "role_id", "el_id", False
)
self.assert_get_cib_called()
- mock_find_el.assert_called_once_with(self.cib, "el_id")
+ find_group.assert_called_once_with(self.cib, "el_id")
mock_unassign.assert_called_once_with("el", "role_id", False)
self.assert_same_cib_pushed()
- self.assertEqual(0, mock_error_convert.call_count)
-
- def test_success_autodelete(
- self, mock_error_convert, mock_find_el, mock_unassign
- ):
- mock_find_el.return_value = "el"
- cmd_acl.unassign_role_from_group(
- self.mock_env, "role_id", "el_id", True
- )
- self.assert_get_cib_called()
- mock_find_el.assert_called_once_with(self.cib, "el_id")
- mock_unassign.assert_called_once_with("el", "role_id", True)
- self.assert_same_cib_pushed()
- self.assertEqual(0, mock_error_convert.call_count)
-
- def test_failure(self, mock_error_convert, mock_find_el, mock_unassign):
- exception_obj = acl_lib.AclGroupNotFound("el_id")
- mock_find_el.side_effect = exception_obj
- self.assert_raises(
- LibraryError,
- lambda: cmd_acl.unassign_role_from_group(
- self.mock_env, "role_id", "el_id", False
- )
- )
- self.assert_get_cib_called()
- mock_find_el.assert_called_once_with(self.cib, "el_id")
- self.assertEqual(0, mock_unassign.call_count)
- self.assert_cib_not_pushed()
- mock_error_convert.assert_called_once_with(exception_obj)
-
-
- at mock.patch("pcs.lib.cib.acl.assign_role")
- at mock.patch("pcs.lib.cib.acl.find_role")
-class AssignRolesToElement(AclCommandsTest):
- def test_success(self, mock_role, mock_assign):
- mock_role.side_effect = lambda _, el_id: "{0}_el".format(el_id)
- cmd_acl._assign_roles_to_element(
- self.cib, "el", ["role1", "role2", "role3"]
- )
- mock_role.assert_has_calls([
- mock.call(self.cib, "role1"),
- mock.call(self.cib, "role2"),
- mock.call(self.cib, "role3")
- ])
- mock_assign.assert_has_calls([
- mock.call("el", "role1_el"),
- mock.call("el", "role2_el"),
- mock.call("el", "role3_el")
- ])
-
- def test_failure(self, mock_role, mock_assign):
- def _mock_role(_, el_id):
- if el_id in ["role1", "role3"]:
- raise acl_lib.AclRoleNotFound(el_id)
- elif el_id == "role2":
- return "role2_el"
- else:
- raise AssertionError("unexpected input")
-
- mock_role.side_effect = _mock_role
- assert_raise_library_error(
- lambda: cmd_acl._assign_roles_to_element(
- self.cib, "el", ["role1", "role2", "role3"]
- ),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "role1",
- "id_description": "role",
- }
- ),
- (
- Severities.ERROR,
- report_codes.ID_NOT_FOUND,
- {
- "id": "role3",
- "id_description": "role",
- }
- )
- )
- mock_role.assert_has_calls([
- mock.call(self.cib, "role1"),
- mock.call(self.cib, "role2"),
- mock.call(self.cib, "role3")
- ])
- mock_assign.assert_called_once_with("el", "role2_el")
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.create_target")
- at mock.patch("pcs.lib.commands.acl._assign_roles_to_element")
+ at mock.patch("pcs.lib.cib.acl.assign_all_roles")
class CreateTargetTest(AclCommandsTest):
def test_success(self, mock_assign, mock_create):
mock_create.return_value = "el"
@@ -436,8 +183,9 @@ class CreateTargetTest(AclCommandsTest):
self.assert_same_cib_pushed()
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.create_group")
- at mock.patch("pcs.lib.commands.acl._assign_roles_to_element")
+ at mock.patch("pcs.lib.cib.acl.assign_all_roles")
class CreateGroupTest(AclCommandsTest):
def test_success(self, mock_assign, mock_create):
mock_create.return_value = "el"
@@ -450,6 +198,7 @@ class CreateGroupTest(AclCommandsTest):
self.assert_same_cib_pushed()
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.remove_target")
class RemoveTargetTest(AclCommandsTest):
def test_success(self, mock_remove):
@@ -459,6 +208,7 @@ class RemoveTargetTest(AclCommandsTest):
self.assert_same_cib_pushed()
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.remove_group")
class RemoveGroupTest(AclCommandsTest):
def test_success(self, mock_remove):
@@ -468,6 +218,7 @@ class RemoveGroupTest(AclCommandsTest):
self.assert_same_cib_pushed()
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.validate_permissions")
@mock.patch("pcs.lib.cib.acl.provide_role")
@mock.patch("pcs.lib.cib.acl.add_permissions_to_role")
@@ -482,6 +233,7 @@ class AddPermissionTest(AclCommandsTest):
self.assert_same_cib_pushed()
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
@mock.patch("pcs.lib.cib.acl.remove_permission")
class RemovePermission(AclCommandsTest):
def test_success(self, mock_remove):
@@ -494,6 +246,7 @@ class RemovePermission(AclCommandsTest):
@mock.patch("pcs.lib.cib.acl.get_target_list")
@mock.patch("pcs.lib.cib.acl.get_group_list")
@mock.patch("pcs.lib.cib.acl.get_role_list")
+ at mock.patch("pcs.lib.commands.acl.get_acls", mock.Mock(side_effect=lambda x:x))
class GetConfigTest(AclCommandsTest):
def test_success(self, mock_role, mock_group, mock_target):
mock_role.return_value = "role"
@@ -507,4 +260,3 @@ class GetConfigTest(AclCommandsTest):
},
cmd_acl.get_config(self.mock_env)
)
-
diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
index 440c230..294fe00 100644
--- a/pcs/lib/commands/test/test_alert.py
+++ b/pcs/lib/commands/test/test_alert.py
@@ -26,7 +26,7 @@ from pcs.lib.external import CommandRunner
import pcs.lib.commands.alert as cmd_alert
- at mock.patch("pcs.lib.cib.tools.upgrade_cib")
+ at mock.patch("pcs.lib.env.ensure_cib_version")
class CreateAlertTest(TestCase):
def setUp(self):
self.mock_log = mock.MagicMock(spec_set=logging.Logger)
@@ -36,7 +36,7 @@ class CreateAlertTest(TestCase):
self.mock_log, self.mock_rep, cib_data="<cib/>"
)
- def test_no_path(self, mock_upgrade_cib):
+ def test_no_path(self, mock_ensure_cib_version):
assert_raise_library_error(
lambda: cmd_alert.create_alert(
self.mock_env, None, None, None, None
@@ -44,21 +44,20 @@ class CreateAlertTest(TestCase):
(
Severities.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "path"}
+ {"option_names": ["path"]}
)
)
- self.assertEqual(0, mock_upgrade_cib.call_count)
+ mock_ensure_cib_version.assert_not_called()
- def test_upgrade_needed(self, mock_upgrade_cib):
- self.mock_env._push_cib_xml(
- """
+ def test_upgrade_needed(self, mock_ensure_cib_version):
+ original_cib_xml = """
<cib validate-with="pacemaker-2.4.1">
<configuration>
</configuration>
</cib>
- """
- )
- mock_upgrade_cib.return_value = etree.XML(
+ """
+ self.mock_env._push_cib_xml(original_cib_xml)
+ mock_ensure_cib_version.return_value = etree.XML(
"""
<cib validate-with="pacemaker-2.5.0">
<configuration>
@@ -109,7 +108,7 @@ class CreateAlertTest(TestCase):
""",
self.mock_env._get_cib_xml()
)
- self.assertEqual(1, mock_upgrade_cib.call_count)
+ self.assertEqual(1, mock_ensure_cib_version.call_count)
class UpdateAlertTest(TestCase):
@@ -264,8 +263,8 @@ class UpdateAlertTest(TestCase):
),
(
Severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "unknown"}
+ report_codes.ID_NOT_FOUND,
+ {"id": "unknown"}
)
)
@@ -348,13 +347,13 @@ class RemoveAlertTest(TestCase):
report_list = [
(
Severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "unknown"}
+ report_codes.ID_NOT_FOUND,
+ {"id": "unknown"}
),
(
Severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "unknown2"}
+ report_codes.ID_NOT_FOUND,
+ {"id": "unknown2"}
)
]
assert_raise_library_error(
@@ -388,18 +387,6 @@ class AddRecipientTest(TestCase):
self.mock_log, self.mock_rep, cib_data=cib
)
- def test_alert_not_found(self):
- assert_raise_library_error(
- lambda: cmd_alert.add_recipient(
- self.mock_env, "unknown", "recipient", {}, {}
- ),
- (
- Severities.ERROR,
- report_codes.CIB_ALERT_NOT_FOUND,
- {"alert": "unknown"}
- )
- )
-
def test_value_not_defined(self):
assert_raise_library_error(
lambda: cmd_alert.add_recipient(
@@ -408,7 +395,7 @@ class AddRecipientTest(TestCase):
(
Severities.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "value"}
+ {"option_names": ["value"]}
)
)
@@ -596,7 +583,7 @@ class UpdateRecipientTest(TestCase):
report_codes.ID_NOT_FOUND,
{
"id": "recipient",
- "id_description": "Recipient"
+ "id_description": "recipient"
}
)
)
diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
index c24eea2..4e091a2 100644
--- a/pcs/lib/commands/test/test_booth.py
+++ b/pcs/lib/commands/test/test_booth.py
@@ -35,7 +35,7 @@ from pcs.lib.external import (
patch_commands = create_patcher("pcs.lib.commands.booth")
- at mock.patch("pcs.lib.booth.config_files.generate_key", return_value="key value")
+ at mock.patch("pcs.lib.tools.generate_key", return_value="key value")
@mock.patch("pcs.lib.commands.booth.build", return_value="config content")
@mock.patch("pcs.lib.booth.config_structure.validate_peers")
class ConfigSetupTest(TestCase):
@@ -111,6 +111,7 @@ class ConfigDestroyTest(TestCase):
@patch_commands("parse", mock.Mock(side_effect=LibraryError()))
def test_raises_when_cannot_get_content_of_config(self):
env = mock.MagicMock()
+ env.booth.name = "somename"
assert_raise_library_error(
lambda: commands.config_destroy(env),
(
@@ -126,6 +127,7 @@ class ConfigDestroyTest(TestCase):
@patch_commands("parse", mock.Mock(side_effect=LibraryError()))
def test_remove_config_even_if_cannot_get_its_content_when_forced(self):
env = mock.MagicMock()
+ env.booth.name = "somename"
env.report_processor = MockLibraryReportProcessor()
commands.config_destroy(env, ignore_config_load_problems=True)
env.booth.remove_config.assert_called_once_with()
@@ -544,7 +546,6 @@ class CreateInClusterTest(TestCase):
assert_raise_library_error(
lambda: commands.create_in_cluster(
mock.MagicMock(), "somename", ip="1.2.3.4",
- resource_create=None, resource_remove=None,
),
(
Severities.ERROR,
diff --git a/pcs/lib/commands/test/test_fencing_topology.py b/pcs/lib/commands/test/test_fencing_topology.py
new file mode 100644
index 0000000..a6139c4
--- /dev/null
+++ b/pcs/lib/commands/test/test_fencing_topology.py
@@ -0,0 +1,257 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from functools import partial
+import logging
+
+from pcs.common.fencing_topology import (
+ TARGET_TYPE_REGEXP,
+ TARGET_TYPE_ATTRIBUTE,
+)
+from pcs.lib.env import LibraryEnvironment
+from pcs.test.tools.misc import create_patcher
+from pcs.test.tools.pcs_unittest import mock, TestCase
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+
+from pcs.lib.commands import fencing_topology as lib
+
+
+create_lib_env = partial(
+ LibraryEnvironment,
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+)
+patch_env = partial(mock.patch.object, LibraryEnvironment)
+patch_command = create_patcher("pcs.lib.commands.fencing_topology")
+
+
+ at patch_command("cib_fencing_topology.add_level")
+ at patch_command("get_resources")
+ at patch_command("get_fencing_topology")
+ at patch_env("push_cib")
+ at patch_command("ClusterState")
+ at patch_command("get_cluster_status_xml")
+ at patch_env("get_cib")
+ at patch_env("cmd_runner", lambda self: "mocked cmd_runner")
+class AddLevel(TestCase):
+ def prepare_mocks(
+ self, mock_get_cib, mock_status_xml, mock_status, mock_get_topology,
+ mock_get_resources
+ ):
+ mock_get_cib.return_value = "mocked cib"
+ mock_status_xml.return_value = "mock get_cluster_status_xml"
+ mock_status.return_value = mock.MagicMock(
+ node_section=mock.MagicMock(nodes="nodes")
+ )
+ mock_get_topology.return_value = "topology el"
+ mock_get_resources.return_value = "resources_el"
+
+ def assert_mocks(
+ self, mock_status_xml, mock_status, mock_get_topology,
+ mock_get_resources, mock_push_cib
+ ):
+ mock_status_xml.assert_called_once_with("mocked cmd_runner")
+ mock_status.assert_called_once_with("mock get_cluster_status_xml")
+ mock_get_topology.assert_called_once_with("mocked cib")
+ mock_get_resources.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_called_once_with("mocked cib")
+
+ def test_success(
+ self, mock_get_cib, mock_status_xml, mock_status, mock_push_cib,
+ mock_get_topology, mock_get_resources, mock_add_level
+ ):
+ self.prepare_mocks(
+ mock_get_cib, mock_status_xml, mock_status, mock_get_topology,
+ mock_get_resources
+ )
+ lib_env = create_lib_env()
+
+ lib.add_level(
+ lib_env, "level", "target type", "target value", "devices",
+ "force device", "force node"
+ )
+
+ mock_add_level.assert_called_once_with(
+ lib_env.report_processor,
+ "topology el",
+ "resources_el",
+ "level",
+ "target type",
+ "target value",
+ "devices",
+ "nodes",
+ "force device",
+ "force node"
+ )
+ mock_get_cib.assert_called_once_with(None)
+ self.assert_mocks(
+ mock_status_xml, mock_status, mock_get_topology, mock_get_resources,
+ mock_push_cib
+ )
+
+ def test_target_attribute_updates_cib(
+ self, mock_get_cib, mock_status_xml, mock_status, mock_push_cib,
+ mock_get_topology, mock_get_resources, mock_add_level
+ ):
+ self.prepare_mocks(
+ mock_get_cib, mock_status_xml, mock_status, mock_get_topology,
+ mock_get_resources
+ )
+ lib_env = create_lib_env()
+
+ lib.add_level(
+ lib_env, "level", TARGET_TYPE_ATTRIBUTE, "target value", "devices",
+ "force device", "force node"
+ )
+
+ mock_add_level.assert_called_once_with(
+ lib_env.report_processor,
+ "topology el",
+ "resources_el",
+ "level",
+ TARGET_TYPE_ATTRIBUTE,
+ "target value",
+ "devices",
+ "nodes",
+ "force device",
+ "force node"
+ )
+ mock_get_cib.assert_called_once_with((2, 4, 0))
+ self.assert_mocks(
+ mock_status_xml, mock_status, mock_get_topology, mock_get_resources,
+ mock_push_cib
+ )
+
+ def test_target_regexp_updates_cib(
+ self, mock_get_cib, mock_status_xml, mock_status, mock_push_cib,
+ mock_get_topology, mock_get_resources, mock_add_level
+ ):
+ self.prepare_mocks(
+ mock_get_cib, mock_status_xml, mock_status, mock_get_topology,
+ mock_get_resources
+ )
+ lib_env = create_lib_env()
+
+ lib.add_level(
+ lib_env, "level", TARGET_TYPE_REGEXP, "target value", "devices",
+ "force device", "force node"
+ )
+
+ mock_add_level.assert_called_once_with(
+ lib_env.report_processor,
+ "topology el",
+ "resources_el",
+ "level",
+ TARGET_TYPE_REGEXP,
+ "target value",
+ "devices",
+ "nodes",
+ "force device",
+ "force node"
+ )
+ mock_get_cib.assert_called_once_with((2, 3, 0))
+ self.assert_mocks(
+ mock_status_xml, mock_status, mock_get_topology, mock_get_resources,
+ mock_push_cib
+ )
+
+ at patch_command("cib_fencing_topology.export")
+ at patch_command("get_fencing_topology")
+ at patch_env("push_cib")
+ at patch_env("get_cib", lambda self: "mocked cib")
+class GetConfig(TestCase):
+ def test_success(self, mock_push_cib, mock_get_topology, mock_export):
+ mock_get_topology.return_value = "topology el"
+ mock_export.return_value = "exported config"
+ lib_env = create_lib_env()
+
+ self.assertEqual(
+ "exported config",
+ lib.get_config(lib_env)
+ )
+
+ mock_export.assert_called_once_with("topology el")
+ mock_get_topology.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_not_called()
+
+
+ at patch_command("cib_fencing_topology.remove_all_levels")
+ at patch_command("get_fencing_topology")
+ at patch_env("push_cib")
+ at patch_env("get_cib", lambda self: "mocked cib")
+class RemoveAllLevels(TestCase):
+ def test_success(self, mock_push_cib, mock_get_topology, mock_remove):
+ mock_get_topology.return_value = "topology el"
+ lib_env = create_lib_env()
+
+ lib.remove_all_levels(lib_env)
+
+ mock_remove.assert_called_once_with("topology el")
+ mock_get_topology.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_called_once_with("mocked cib")
+
+
+ at patch_command("cib_fencing_topology.remove_levels_by_params")
+ at patch_command("get_fencing_topology")
+ at patch_env("push_cib")
+ at patch_env("get_cib", lambda self: "mocked cib")
+class RemoveLevelsByParams(TestCase):
+ def test_success(self, mock_push_cib, mock_get_topology, mock_remove):
+ mock_get_topology.return_value = "topology el"
+ lib_env = create_lib_env()
+
+ lib.remove_levels_by_params(
+ lib_env, "level", "target type", "target value", "devices", "ignore"
+ )
+
+ mock_remove.assert_called_once_with(
+ lib_env.report_processor,
+ "topology el",
+ "level",
+ "target type",
+ "target value",
+ "devices",
+ "ignore"
+ )
+ mock_get_topology.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_called_once_with("mocked cib")
+
+
+ at patch_command("cib_fencing_topology.verify")
+ at patch_command("get_resources")
+ at patch_command("get_fencing_topology")
+ at patch_env("push_cib")
+ at patch_command("ClusterState")
+ at patch_command("get_cluster_status_xml")
+ at patch_env("get_cib", lambda self: "mocked cib")
+ at patch_env("cmd_runner", lambda self: "mocked cmd_runner")
+class Verify(TestCase):
+ def test_success(
+ self, mock_status_xml, mock_status, mock_push_cib, mock_get_topology,
+ mock_get_resources, mock_verify
+ ):
+ mock_status_xml.return_value = "mock get_cluster_status_xml"
+ mock_status.return_value = mock.MagicMock(
+ node_section=mock.MagicMock(nodes="nodes")
+ )
+ mock_get_topology.return_value = "topology el"
+ mock_get_resources.return_value = "resources_el"
+ lib_env = create_lib_env()
+
+ lib.verify(lib_env)
+
+ mock_verify.assert_called_once_with(
+ lib_env.report_processor,
+ "topology el",
+ "resources_el",
+ "nodes"
+ )
+ mock_status_xml.assert_called_once_with("mocked cmd_runner")
+ mock_status.assert_called_once_with("mock get_cluster_status_xml")
+ mock_get_topology.assert_called_once_with("mocked cib")
+ mock_get_resources.assert_called_once_with("mocked cib")
+ mock_push_cib.assert_not_called()
diff --git a/pcs/lib/commands/test/test_node.py b/pcs/lib/commands/test/test_node.py
new file mode 100644
index 0000000..13f25dc
--- /dev/null
+++ b/pcs/lib/commands/test/test_node.py
@@ -0,0 +1,296 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from functools import partial
+from contextlib import contextmanager
+
+from lxml import etree
+import logging
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock, TestCase
+from pcs.test.tools.misc import create_patcher
+
+from pcs.common import report_codes
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import ReportItemSeverity as severity, LibraryError
+
+from pcs.lib.commands import node as lib
+
+
+mocked_cib = etree.fromstring("<cib />")
+
+patch_env = partial(mock.patch.object, LibraryEnvironment)
+patch_command = create_patcher("pcs.lib.commands.node")
+
+create_env = partial(
+ LibraryEnvironment,
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+)
+
+def fixture_node(order_num):
+ node = mock.MagicMock(attrs=mock.MagicMock())
+ node.attrs.name = "node-{0}".format(order_num)
+ return node
+
+class StandbyMaintenancePassParameters(TestCase):
+ def setUp(self):
+ self.lib_env = "lib_env"
+ self.nodes = "nodes"
+ self.wait = "wait"
+ self.standby_on = {"standby": "on"}
+ self.standby_off = {"standby": ""}
+ self.maintenance_on = {"maintenance": "on"}
+ self.maintenance_off = {"maintenance": ""}
+
+ at patch_command("_set_instance_attrs_local_node")
+class StandbyMaintenancePassParametersLocal(StandbyMaintenancePassParameters):
+ def test_standby(self, mock_doer):
+ lib.standby_unstandby_local(self.lib_env, True, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.standby_on,
+ self.wait
+ )
+
+ def test_unstandby(self, mock_doer):
+ lib.standby_unstandby_local(self.lib_env, False, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.standby_off,
+ self.wait
+ )
+
+ def test_maintenance(self, mock_doer):
+ lib.maintenance_unmaintenance_local(self.lib_env, True, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.maintenance_on,
+ self.wait
+ )
+
+ def test_unmaintenance(self, mock_doer):
+ lib.maintenance_unmaintenance_local(self.lib_env, False, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.maintenance_off,
+ self.wait
+ )
+
+ at patch_command("_set_instance_attrs_node_list")
+class StandbyMaintenancePassParametersList(StandbyMaintenancePassParameters):
+ def test_standby(self, mock_doer):
+ lib.standby_unstandby_list(self.lib_env, True, self.nodes, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.standby_on,
+ self.nodes,
+ self.wait
+ )
+
+ def test_unstandby(self, mock_doer):
+ lib.standby_unstandby_list(self.lib_env, False, self.nodes, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.standby_off,
+ self.nodes,
+ self.wait
+ )
+
+ def test_maintenance(self, mock_doer):
+ lib.maintenance_unmaintenance_list(
+ self.lib_env, True, self.nodes, self.wait
+ )
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.maintenance_on,
+ self.nodes,
+ self.wait
+ )
+
+ def test_unmaintenance(self, mock_doer):
+ lib.maintenance_unmaintenance_list(
+ self.lib_env, False, self.nodes, self.wait
+ )
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.maintenance_off,
+ self.nodes,
+ self.wait
+ )
+
+ at patch_command("_set_instance_attrs_all_nodes")
+class StandbyMaintenancePassParametersAll(StandbyMaintenancePassParameters):
+ def test_standby(self, mock_doer):
+ lib.standby_unstandby_all(self.lib_env, True, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.standby_on,
+ self.wait
+ )
+
+ def test_unstandby(self, mock_doer):
+ lib.standby_unstandby_all(self.lib_env, False, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.standby_off,
+ self.wait
+ )
+
+ def test_maintenance(self, mock_doer):
+ lib.maintenance_unmaintenance_all(self.lib_env, True, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.maintenance_on,
+ self.wait
+ )
+
+ def test_unmaintenance(self, mock_doer):
+ lib.maintenance_unmaintenance_all(self.lib_env, False, self.wait)
+ mock_doer.assert_called_once_with(
+ self.lib_env,
+ self.maintenance_off,
+ self.wait
+ )
+
+class SetInstaceAttrsBase(TestCase):
+ node_count = 2
+ def setUp(self):
+ self.cluster_nodes = [fixture_node(i) for i in range(self.node_count)]
+
+ self.launch = {"pre": False, "post": False}
+ @contextmanager
+ def cib_runner_nodes_contextmanager(env, wait):
+ self.launch["pre"] = True
+ yield ("cib", "mock_runner", self.cluster_nodes)
+ self.launch["post"] = True
+
+ patcher = patch_command('cib_runner_nodes')
+ self.addCleanup(patcher.stop)
+ patcher.start().side_effect = cib_runner_nodes_contextmanager
+
+ def assert_context_manager_launched(self, pre=False, post=False):
+ self.assertEqual(self.launch, {"pre": pre, "post": post})
+
+ at patch_command("update_node_instance_attrs")
+ at patch_command("get_local_node_name")
+class SetInstaceAttrsLocal(SetInstaceAttrsBase):
+ node_count = 2
+
+ def test_not_possible_with_cib_file(self, mock_name, mock_attrs):
+ assert_raise_library_error(
+ lambda: lib._set_instance_attrs_local_node(
+ create_env(cib_data="<cib />"),
+ "attrs",
+ "wait"
+ ),
+ (
+ severity.ERROR,
+ report_codes.LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE,
+ {}
+ )
+ )
+ self.assert_context_manager_launched(pre=False, post=False)
+ mock_name.assert_not_called()
+ mock_attrs.assert_not_called()
+
+ def test_success(self, mock_name, mock_attrs):
+ mock_name.return_value = "node-1"
+
+ lib._set_instance_attrs_local_node(create_env(), "attrs", False)
+
+ self.assert_context_manager_launched(pre=True, post=True)
+ mock_name.assert_called_once_with("mock_runner")
+ mock_attrs.assert_called_once_with(
+ "cib", "node-1", "attrs", self.cluster_nodes
+ )
+
+ at patch_command("update_node_instance_attrs")
+class SetInstaceAttrsAll(SetInstaceAttrsBase):
+ node_count = 2
+
+ def test_success(self, mock_attrs):
+ lib._set_instance_attrs_all_nodes(create_env(), "attrs", False)
+
+ self.assertEqual(2, len(mock_attrs.mock_calls))
+ mock_attrs.assert_has_calls([
+ mock.call("cib", "node-0", "attrs", self.cluster_nodes),
+ mock.call("cib", "node-1", "attrs", self.cluster_nodes),
+ ])
+
+ at patch_command("update_node_instance_attrs")
+class SetInstaceAttrsList(SetInstaceAttrsBase):
+ node_count = 4
+
+ def test_success(self, mock_attrs):
+ lib._set_instance_attrs_node_list(
+ create_env(), "attrs", ["node-1", "node-2"], False
+ )
+
+ self.assert_context_manager_launched(pre=True, post=True)
+ self.assertEqual(2, len(mock_attrs.mock_calls))
+ mock_attrs.assert_has_calls([
+ mock.call("cib", "node-1", "attrs", self.cluster_nodes),
+ mock.call("cib", "node-2", "attrs", self.cluster_nodes),
+ ])
+
+ def test_bad_node(self, mock_attrs):
+ assert_raise_library_error(
+ lambda: lib._set_instance_attrs_node_list(
+ create_env(), "attrs", ["node-1", "node-9"], False
+ ),
+ (
+ severity.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {
+ "node": "node-9",
+ }
+ )
+ )
+ mock_attrs.assert_not_called()
+
+ at patch_env("push_cib")
+class CibRunnerNodes(TestCase):
+ def setUp(self):
+ self.env = create_env()
+
+ @patch_env("get_cib", lambda self: "mocked cib")
+ @patch_env("cmd_runner", lambda self: "mocked cmd_runner")
+ @patch_env("ensure_wait_satisfiable")
+ @patch_command("ClusterState")
+ @patch_command("get_cluster_status_xml")
+ def test_wire_together_all_expected_dependecies(
+ self, get_cluster_status_xml, ClusterState, ensure_wait_satisfiable,
+ push_cib
+ ):
+ ClusterState.return_value = mock.MagicMock(
+ node_section=mock.MagicMock(nodes="nodes")
+ )
+ get_cluster_status_xml.return_value = "mock get_cluster_status_xml"
+ wait = 10
+
+ with lib.cib_runner_nodes(self.env, wait) as (cib, runner, nodes):
+ self.assertEqual(cib, "mocked cib")
+ self.assertEqual(runner, "mocked cmd_runner")
+ self.assertEqual(nodes, "nodes")
+ ensure_wait_satisfiable.assert_called_once_with(wait)
+ get_cluster_status_xml.assert_called_once_with("mocked cmd_runner")
+ ClusterState.assert_called_once_with("mock get_cluster_status_xml")
+
+ push_cib.assert_called_once_with("mocked cib", wait)
+
+ @patch_env("ensure_wait_satisfiable", mock.Mock(side_effect=LibraryError))
+ def test_raises_when_wait_is_not_satisfiable(self, push_cib):
+ def run():
+ #pylint: disable=unused-variable
+ with lib.cib_runner_nodes(self.env, "wait") as (cib, runner, nodes):
+ pass
+
+ self.assertRaises(LibraryError, run)
+ push_cib.assert_not_called()
diff --git a/pcs/lib/commands/test/test_resource_agent.py b/pcs/lib/commands/test/test_resource_agent.py
index 9652591..fd1c2bb 100644
--- a/pcs/lib/commands/test/test_resource_agent.py
+++ b/pcs/lib/commands/test/test_resource_agent.py
@@ -8,7 +8,7 @@ from __future__ import (
import logging
from lxml import etree
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.assertions import assert_raise_library_error, start_tag_error_text
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import mock, TestCase
@@ -238,9 +238,9 @@ class TestListAgents(TestCase):
@mock.patch.object(lib_ra.Agent, "_get_metadata", autospec=True)
def test_describe(self, mock_metadata):
def mock_metadata_func(self):
- if self._full_agent_name == "ocf:test:Stateful":
+ if self.get_name() == "ocf:test:Stateful":
raise lib_ra.UnableToGetAgentMetadata(
- self._full_agent_name,
+ self.get_name(),
"test exception"
)
return etree.XML("""
@@ -252,7 +252,7 @@ class TestListAgents(TestCase):
<actions>
</actions>
</resource-agent>
- """.format(name=self._full_agent_name))
+ """.format(name=self.get_name()))
mock_metadata.side_effect = mock_metadata_func
# Stateful is missing as it does not provide valid metadata - see above
@@ -284,6 +284,26 @@ class TestListAgents(TestCase):
)
+class CompleteAgentList(TestCase):
+ def test_skip_agent_name_when_InvalidResourceAgentName_raised(self):
+ invalid_agent_name = "systemd:lvm2-pvscan at 252:2"#suppose it is invalid
+ class Agent(object):
+ def __init__(self, runner, name):
+ if name == invalid_agent_name:
+ raise lib_ra.InvalidResourceAgentName(name)
+ self.name = name
+
+ def get_name_info(self):
+ return self.name
+
+ self.assertEqual(["ocf:heartbeat:Dummy"], lib._complete_agent_list(
+ mock.MagicMock(),
+ ["ocf:heartbeat:Dummy", invalid_agent_name],
+ describe=False,
+ search=False,
+ metadata_class=Agent,
+ ))
+
@mock.patch.object(lib_ra.ResourceAgent, "_load_metadata", autospec=True)
@mock.patch("pcs.lib.resource_agent.guess_exactly_one_resource_agent_full_name")
@mock.patch.object(
@@ -312,6 +332,7 @@ class TestDescribeAgent(TestCase):
"longdesc": "long desc",
"parameters": [],
"actions": [],
+ "default_actions": [{"interval": "60s", "name": "monitor"}],
}
@@ -353,7 +374,7 @@ class TestDescribeAgent(TestCase):
report_codes.UNABLE_TO_GET_AGENT_METADATA,
{
"agent": "ocf:test:Dummy",
- "reason": "Start tag expected, '<' not found, line 1, column 1",
+ "reason": start_tag_error_text(),
}
)
)
diff --git a/pcs/lib/commands/test/test_stonith_agent.py b/pcs/lib/commands/test/test_stonith_agent.py
index eaf5f93..1bbbcb1 100644
--- a/pcs/lib/commands/test/test_stonith_agent.py
+++ b/pcs/lib/commands/test/test_stonith_agent.py
@@ -8,7 +8,7 @@ from __future__ import (
import logging
from lxml import etree
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.assertions import assert_raise_library_error, start_tag_error_text
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import mock, TestCase
@@ -97,10 +97,11 @@ class TestListAgents(TestCase):
@mock.patch.object(lib_ra.Agent, "_get_metadata", autospec=True)
def test_describe(self, mock_metadata):
+ self.maxDiff = None
def mock_metadata_func(self):
- if self._full_agent_name == "ocf:test:Stateful":
+ if self.get_name() == "ocf:test:Stateful":
raise lib_ra.UnableToGetAgentMetadata(
- self._full_agent_name,
+ self.get_name(),
"test exception"
)
return etree.XML("""
@@ -112,7 +113,7 @@ class TestListAgents(TestCase):
<actions>
</actions>
</resource-agent>
- """.format(name=self._full_agent_name))
+ """.format(name=self.get_name()))
mock_metadata.side_effect = mock_metadata_func
# Stateful is missing as it does not provide valid metadata - see above
@@ -121,22 +122,22 @@ class TestListAgents(TestCase):
[
{
"name": "fence_apc",
- "shortdesc": "short stonith:fence_apc",
- "longdesc": "long stonith:fence_apc",
+ "shortdesc": "short fence_apc",
+ "longdesc": "long fence_apc",
"parameters": [],
"actions": [],
},
{
"name": "fence_dummy",
- "shortdesc": "short stonith:fence_dummy",
- "longdesc": "long stonith:fence_dummy",
+ "shortdesc": "short fence_dummy",
+ "longdesc": "long fence_dummy",
"parameters": [],
"actions": [],
},
{
"name": "fence_xvm",
- "shortdesc": "short stonith:fence_xvm",
- "longdesc": "long stonith:fence_xvm",
+ "shortdesc": "short fence_xvm",
+ "longdesc": "long fence_xvm",
"parameters": [],
"actions": [],
},
@@ -176,6 +177,7 @@ class TestDescribeAgent(TestCase):
"longdesc": "long desc",
"parameters": [],
"actions": [],
+ "default_actions": [{"name": "monitor", "interval": "60s"}],
}
@@ -204,7 +206,7 @@ class TestDescribeAgent(TestCase):
report_codes.UNABLE_TO_GET_AGENT_METADATA,
{
"agent": "fence_dummy",
- "reason": "Start tag expected, '<' not found, line 1, column 1",
+ "reason": start_tag_error_text(),
}
)
)
diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
index edf592a..7287bc6 100644
--- a/pcs/lib/commands/test/test_ticket.py
+++ b/pcs/lib/commands/test/test_ticket.py
@@ -64,8 +64,13 @@ class CreateTest(TestCase):
),
(
severities.ERROR,
- report_codes.RESOURCE_DOES_NOT_EXIST,
- {"resource_id": "resourceA"},
+ report_codes.ID_NOT_FOUND,
+ {
+ "context_type": "cib",
+ "context_id": "",
+ "id": "resourceA",
+ "id_description": "resource"
+ },
),
)
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
index be621c0..23b8e4e 100644
--- a/pcs/lib/corosync/config_facade.py
+++ b/pcs/lib/corosync/config_facade.py
@@ -147,7 +147,7 @@ class ConfigFacade(object):
allowed_names = self.__class__.QUORUM_OPTIONS
if name not in allowed_names:
report_items.append(
- reports.invalid_option(name, allowed_names, "quorum")
+ reports.invalid_option([name], allowed_names, "quorum")
)
continue
@@ -386,6 +386,7 @@ class ConfigFacade(object):
])
allowed_options = required_options | optional_options
model_options_names = frozenset(model_options.keys())
+ missing_options = []
report_items = []
severity = (
ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR
@@ -393,13 +394,12 @@ class ConfigFacade(object):
forceable = None if force else report_codes.FORCE_OPTIONS
if need_required:
- for missing in sorted(required_options - model_options_names):
- report_items.append(reports.required_option_is_missing(missing))
+ missing_options += required_options - model_options_names
for name, value in sorted(model_options.items()):
if name not in allowed_options:
report_items.append(reports.invalid_option(
- name,
+ [name],
allowed_options,
"quorum device model",
severity,
@@ -410,9 +410,7 @@ class ConfigFacade(object):
if value == "":
# do not allow to remove required options
if name in required_options:
- report_items.append(
- reports.required_option_is_missing(name)
- )
+ missing_options.append(name)
else:
continue
@@ -455,6 +453,11 @@ class ConfigFacade(object):
name, value, allowed_values, severity, forceable
))
+ if missing_options:
+ report_items.append(
+ reports.required_option_is_missing(sorted(missing_options))
+ )
+
return report_items
def __validate_quorum_device_generic_options(
@@ -476,7 +479,7 @@ class ConfigFacade(object):
# model is never allowed in generic options, it is passed
# in its own argument
report_items.append(reports.invalid_option(
- name,
+ [name],
allowed_options,
"quorum device",
severity if name != "model" else ReportItemSeverity.ERROR,
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
index f453be6..c41685b 100644
--- a/pcs/lib/env.py
+++ b/pcs/lib/env.py
@@ -5,14 +5,14 @@ from __future__ import (
unicode_literals,
)
-import os.path
-
from lxml import etree
+import os.path
+import tempfile
from pcs import settings
from pcs.lib import reports
from pcs.lib.booth.env import BoothEnv
-from pcs.lib.cib.tools import ensure_cib_version
+from pcs.lib.pacemaker.env import PacemakerEnv
from pcs.lib.cluster_conf_facade import ClusterConfFacade
from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
from pcs.lib.corosync.live import (
@@ -33,12 +33,17 @@ from pcs.lib.nodes_task import (
check_corosync_offline_on_nodes,
qdevice_reload_on_nodes,
)
-from pcs.lib.pacemaker import (
+from pcs.lib.pacemaker.live import (
+ ensure_wait_for_idle_support,
+ ensure_cib_version,
get_cib,
get_cib_xml,
replace_cib_configuration_xml,
+ wait_for_idle,
+ get_cluster_status_xml,
)
-
+from pcs.lib.pacemaker.state import get_cluster_state_dom
+from pcs.lib.pacemaker.values import get_valid_timeout_seconds
class LibraryEnvironment(object):
# pylint: disable=too-many-instance-attributes
@@ -52,8 +57,10 @@ class LibraryEnvironment(object):
cib_data=None,
corosync_conf_data=None,
booth=None,
+ pacemaker=None,
auth_tokens_getter=None,
cluster_conf_data=None,
+ request_timeout=None,
):
self._logger = logger
self._report_processor = report_processor
@@ -65,6 +72,10 @@ class LibraryEnvironment(object):
self._booth = (
BoothEnv(report_processor, booth) if booth is not None else None
)
+ #pacemaker is currently not mocked and it provides only an access to
+ #the authkey
+ self._pacemaker = PacemakerEnv()
+ self._request_timeout = request_timeout
self._is_cman_cluster = None
# TODO tokens probably should not be inserted from outside, but we're
# postponing dealing with them, because it's not that easy to move
@@ -72,6 +83,9 @@ class LibraryEnvironment(object):
self._auth_tokens_getter = auth_tokens_getter
self._auth_tokens = None
self._cib_upgraded = False
+ self._cib_data_tmp_file = None
+
+ self.__timeout_cache = {}
@property
def logger(self):
@@ -109,32 +123,60 @@ class LibraryEnvironment(object):
cib = get_cib(self._get_cib_xml())
if minimal_version is not None:
upgraded_cib = ensure_cib_version(
- self.cmd_runner(), cib, minimal_version
+ self.cmd_runner(),
+ cib,
+ minimal_version
)
if upgraded_cib is not None:
cib = upgraded_cib
+ if self.is_cib_live and not self._cib_upgraded:
+ self.report_processor.process(
+ reports.cib_upgrade_successful()
+ )
self._cib_upgraded = True
return cib
+ def get_cluster_state(self):
+ return get_cluster_state_dom(get_cluster_status_xml(self.cmd_runner()))
+
def _push_cib_xml(self, cib_data):
if self.is_cib_live:
- replace_cib_configuration_xml(
- self.cmd_runner(), cib_data, self._cib_upgraded
- )
- if self._cib_upgraded:
- self._cib_upgraded = False
- self.report_processor.process(reports.cib_upgrade_successful())
+ replace_cib_configuration_xml(self.cmd_runner(), cib_data)
+ self._cib_upgraded = False
else:
self._cib_data = cib_data
+ def _get_wait_timeout(self, wait):
+ if wait is False:
+ return False
+
+ if wait not in self.__timeout_cache:
+ if not self.is_cib_live:
+ raise LibraryError(reports.wait_for_idle_not_live_cluster())
+ ensure_wait_for_idle_support(self.cmd_runner())
+ self.__timeout_cache[wait] = get_valid_timeout_seconds(wait)
+ return self.__timeout_cache[wait]
+
- def push_cib(self, cib):
+ def ensure_wait_satisfiable(self, wait):
+ """
+ Raise when wait is not supported or when wait is not valid wait value.
+
+ mixed wait can be False when waiting is not required or valid timeout
+ """
+ self._get_wait_timeout(wait)
+
+ def push_cib(self, cib, wait=False):
+ timeout = self._get_wait_timeout(wait)
#etree returns bytes: b'xml'
#python 3 removed .encode() from bytes
#run(...) calls subprocess.Popen.communicate which calls encode...
#so here is bytes to str conversion
self._push_cib_xml(etree.tostring(cib).decode())
+ if timeout is not False:
+ wait_for_idle(self.cmd_runner(), timeout)
+
@property
def is_cib_live(self):
return self._cib_data is None
@@ -213,6 +255,7 @@ class LibraryEnvironment(object):
return exists_local_corosync_conf()
def command_expect_live_corosync_env(self):
+ # TODO get rid of cli knowledge
if not self.is_corosync_conf_live:
raise LibraryError(reports.live_environment_required([
"--corosync_conf"
@@ -227,8 +270,26 @@ class LibraryEnvironment(object):
# make sure to get output of external processes in English and ASCII
"LC_ALL": "C",
}
+
if self.user_login:
runner_env["CIB_user"] = self.user_login
+
+ if not self.is_cib_live:
+ # Dump CIB data to a temporary file and set it up in the runner.
+ # This way every called pacemaker tool can access the CIB and we
+ # don't need to take care of it every time the runner is called.
+ if not self._cib_data_tmp_file:
+ try:
+ self._cib_data_tmp_file = tempfile.NamedTemporaryFile(
+ "w+",
+ suffix=".pcs"
+ )
+ self._cib_data_tmp_file.write(self._get_cib_xml())
+ self._cib_data_tmp_file.flush()
+ except EnvironmentError as e:
+ raise LibraryError(reports.cib_save_tmp_error(str(e)))
+ runner_env["CIB_file"] = self._cib_data_tmp_file.name
+
return CommandRunner(self.logger, self.report_processor, runner_env)
def node_communicator(self):
@@ -237,7 +298,8 @@ class LibraryEnvironment(object):
self.report_processor,
self.__get_auth_tokens(),
self.user_login,
- self.user_groups
+ self.user_groups,
+ self._request_timeout
)
def __get_auth_tokens(self):
@@ -251,3 +313,7 @@ class LibraryEnvironment(object):
@property
def booth(self):
return self._booth
+
+ @property
+ def pacemaker(self):
+ return self._pacemaker
diff --git a/pcs/lib/env_file.py b/pcs/lib/env_file.py
index e683a57..92b4124 100644
--- a/pcs/lib/env_file.py
+++ b/pcs/lib/env_file.py
@@ -15,12 +15,12 @@ from pcs.lib.errors import ReportItemSeverity, LibraryError, LibraryEnvError
class GhostFile(object):
is_live = False
- def __init__(self, file_role, content=None):
+ def __init__(self, file_role, content=None, is_binary=False):
self.__file_role = file_role
self.__content = content
self.__no_existing_file_expected = False
self.__can_overwrite_existing_file = False
- self.__is_binary = False
+ self.__is_binary = is_binary
def read(self):
if self.__content is None:
@@ -30,15 +30,20 @@ class GhostFile(object):
return self.__content
+ @property
+ def exists(self):
+ #file will be considered to exist after writing: it is symmetrical with
+ #RealFile
+ return self.__content is not None
+
def remove(self, silence_no_existence):
raise AssertionError("Remove GhostFile is not supported.")
- def write(self, content, file_operation=None, is_binary=False):
+ def write(self, content, file_operation=None):
"""
callable file_operation is there only for RealFile compatible interface
it has no efect
"""
- self.__is_binary = is_binary
self.__content = content
def assert_no_conflict_with_existing(
@@ -58,32 +63,33 @@ class GhostFile(object):
class RealFile(object):
is_live = True
- def __init__(
- self, file_role, file_path,
- overwrite_code=report_codes.FORCE_FILE_OVERWRITE
- ):
+ def __init__(self, file_role, file_path, is_binary=False):
self.__file_role = file_role
self.__file_path = file_path
- self.__overwrite_code = overwrite_code
+ self.__is_binary=is_binary
def assert_no_conflict_with_existing(
self, report_processor, can_overwrite_existing=False
):
- if os.path.exists(self.__file_path):
+ if self.exists:
report_processor.process(reports.file_already_exists(
self.__file_role,
self.__file_path,
ReportItemSeverity.WARNING if can_overwrite_existing
else ReportItemSeverity.ERROR,
forceable=None if can_overwrite_existing
- else self.__overwrite_code,
+ else report_codes.FORCE_FILE_OVERWRITE,
))
- def write(self, content, file_operation=None, is_binary=False):
+ @property
+ def exists(self):
+ return os.path.exists(self.__file_path)
+
+ def write(self, content, file_operation=None):
"""
callable file_operation takes path and proces operation on it e.g. chmod
"""
- mode = "wb" if is_binary else "w"
+ mode = "wb" if self.__is_binary else "w"
try:
with open(self.__file_path, mode) as config_file:
config_file.write(content)
@@ -94,13 +100,14 @@ class RealFile(object):
def read(self):
try:
- with open(self.__file_path, "r") as file:
+ mode = "rb" if self.__is_binary else "r"
+ with open(self.__file_path, mode) as file:
return file.read()
except EnvironmentError as e:
raise self.__report_io_error(e, "read")
def remove(self, silence_no_existence=False):
- if os.path.exists(self.__file_path):
+ if self.exists:
try:
os.remove(self.__file_path)
except EnvironmentError as e:
diff --git a/pcs/lib/env_tools.py b/pcs/lib/env_tools.py
new file mode 100644
index 0000000..bc2c7a4
--- /dev/null
+++ b/pcs/lib/env_tools.py
@@ -0,0 +1,35 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.lib.cib.resource import remote_node, guest_node
+from pcs.lib.xml_tools import get_root
+from pcs.lib.node import NodeAddressesList
+
+
+def get_nodes(corosync_conf=None, tree=None):
+ return NodeAddressesList(
+ (
+ corosync_conf.get_nodes() if corosync_conf
+ else NodeAddressesList([])
+ )
+ +
+ (
+ get_nodes_remote(tree) if tree is not None
+ else NodeAddressesList([])
+ )
+ +
+ (
+ get_nodes_guest(tree) if tree is not None
+ else NodeAddressesList([])
+ )
+ )
+
+def get_nodes_remote(tree):
+ return NodeAddressesList(remote_node.find_node_list(get_root(tree)))
+
+def get_nodes_guest(tree):
+ return NodeAddressesList(guest_node.find_node_list(get_root(tree)))
diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py
index c4263f0..110e896 100644
--- a/pcs/lib/errors.py
+++ b/pcs/lib/errors.py
@@ -59,3 +59,26 @@ class ReportItem(object):
code=self.code,
info=self.info
)
+
+class ReportListAnalyzer(object):
+ def __init__(self, report_list):
+ self.__error_list = None
+ self.__report_list = report_list
+
+ def reports_with_severities(self, severity_list):
+ return [
+ report_item for report_item in self.report_list
+ if report_item.severity in severity_list
+ ]
+
+ @property
+ def report_list(self):
+ return self.__report_list
+
+ @property
+ def error_list(self):
+ if self.__error_list is None:
+ self.__error_list = self.reports_with_severities(
+ [ReportItemSeverity.ERROR]
+ )
+ return self.__error_list
diff --git a/pcs/lib/exchange_formats.md b/pcs/lib/exchange_formats.md
index ebfc288..56bd914 100644
--- a/pcs/lib/exchange_formats.md
+++ b/pcs/lib/exchange_formats.md
@@ -32,3 +32,15 @@ On the key "resource_sets" is a dictionary of resource sets (see Resource set).
"resource_sets": {"options": {"id": "id"}, "ids": ["resourceA", "resourceB"]},
}
```
+
+Resource operation interval duplication
+---------------------------------------
+Dictionary. Key is operation name. Value is list of list of interval.
+```python
+{
+ "monitor": [
+ ["3600s", "60m", "1h"],
+ ["60s", "1m"],
+ ],
+},
+```
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
index 160586a..5e3133d 100644
--- a/pcs/lib/external.py
+++ b/pcs/lib/external.py
@@ -6,9 +6,10 @@ from __future__ import (
)
import base64
-import inspect
+import io
import json
import os
+
try:
# python 2
from pipes import quote as shell_quote
@@ -17,7 +18,6 @@ except ImportError:
from shlex import quote as shell_quote
import re
import signal
-import ssl
import subprocess
import sys
try:
@@ -26,29 +26,12 @@ try:
except ImportError:
# python3
from urllib.parse import urlencode as urllib_urlencode
-try:
- # python2
- from urllib2 import (
- build_opener as urllib_build_opener,
- HTTPCookieProcessor as urllib_HTTPCookieProcessor,
- HTTPSHandler as urllib_HTTPSHandler,
- HTTPError as urllib_HTTPError,
- URLError as urllib_URLError
- )
-except ImportError:
- # python3
- from urllib.request import (
- build_opener as urllib_build_opener,
- HTTPCookieProcessor as urllib_HTTPCookieProcessor,
- HTTPSHandler as urllib_HTTPSHandler
- )
- from urllib.error import (
- HTTPError as urllib_HTTPError,
- URLError as urllib_URLError
- )
from pcs import settings
-from pcs.common import report_codes
+from pcs.common import (
+ pcs_pycurl as pycurl,
+ report_codes,
+)
from pcs.common.tools import (
join_multilines,
simple_cache,
@@ -138,7 +121,7 @@ def disable_service(runner, service, instance=None):
instance -- instance name, it ha no effect on not systemd systems.
If None no instance name will be used.
"""
- if not is_service_installed(runner, service):
+ if not is_service_installed(runner, service, instance):
return
if is_systemctl():
stdout, stderr, retval = runner.run([
@@ -279,15 +262,17 @@ def is_service_running(runner, service, instance=None):
return retval == 0
-def is_service_installed(runner, service):
+def is_service_installed(runner, service, instance=None):
"""
Check if specified service is installed on local system.
runner -- CommandRunner
service -- name of service
+ instance -- systemd service instance
"""
if is_systemctl():
- return service in get_systemd_services(runner)
+ service_name = "{0}{1}".format(service, "" if instance is None else "@")
+ return service_name in get_systemd_services(runner)
else:
return service in get_non_systemd_services(runner)
@@ -356,6 +341,20 @@ def is_cman_cluster(runner):
return match is not None and match.group(1) == "1"
+def is_proxy_set(env_dict):
+ """
+ Returns True whenever any of proxy environment variables (https_proxy,
+ HTTPS_PROXY, all_proxy, ALL_PROXY) are set in env_dict. False otherwise.
+
+ env_dict -- environment variables in dict
+ """
+ proxy_list = ["https_proxy", "all_proxy"]
+ for var in proxy_list + [v.upper() for v in proxy_list]:
+ if env_dict.get(var, "") != "":
+ return True
+ return False
+
+
class CommandRunner(object):
def __init__(self, logger, reporter, env_vars=None):
self._logger = logger
@@ -375,18 +374,31 @@ class CommandRunner(object):
# set own PATH or CIB_file, we must allow it. I.e. it wants to run
# a pacemaker tool on a CIB in a file but cannot afford the risk of
# changing the CIB in the file specified by the user.
- env_vars = self._env_vars
+ env_vars = self._env_vars.copy()
env_vars.update(
dict(env_extend) if env_extend else dict()
)
log_args = " ".join([shell_quote(x) for x in args])
- msg = "Running: {args}"
- if stdin_string:
- msg += "\n--Debug Input Start--\n{stdin}\n--Debug Input End--"
- self._logger.debug(msg.format(args=log_args, stdin=stdin_string))
+ self._logger.debug(
+ "Running: {args}\nEnvironment:{env_vars}{stdin_string}".format(
+ args=log_args,
+ stdin_string=("" if not stdin_string else (
+ "\n--Debug Input Start--\n{0}\n--Debug Input End--"
+ .format(stdin_string)
+ )),
+ env_vars=("" if not env_vars else (
+ "\n" + "\n".join([
+ " {0}={1}".format(key, val)
+ for key, val in sorted(env_vars.items())
+ ])
+ ))
+ )
+ )
self._reporter.process(
- reports.run_external_process_started(log_args, stdin_string)
+ reports.run_external_process_started(
+ log_args, stdin_string, env_vars
+ )
)
try:
@@ -456,6 +468,10 @@ class NodeUnsupportedCommandException(NodeCommunicationException):
pass
+class NodeConnectionTimedOutException(NodeCommunicationException):
+ pass
+
+
def node_communicator_exception_to_report_item(
e, severity=ReportItemSeverity.ERROR, forceable=None
):
@@ -479,6 +495,8 @@ def node_communicator_exception_to_report_item(
reports.node_communication_error_other_error,
NodeConnectionException:
reports.node_communication_error_unable_to_connect,
+ NodeConnectionTimedOutException:
+ reports.node_communication_error_timed_out,
}
if e.__class__ in exception_to_report:
return exception_to_report[e.__class__](
@@ -509,42 +527,90 @@ class NodeCommunicator(object):
"""
return json.dumps(data)
- def __init__(self, logger, reporter, auth_tokens, user=None, groups=None):
+ def __init__(
+ self, logger, reporter, auth_tokens, user=None, groups=None,
+ request_timeout=None
+ ):
"""
auth_tokens authorization tokens for nodes: {node: token}
user username
groups groups the user is member of
+ request_timeout -- positive integer, time for one reqest in seconds
"""
self._logger = logger
self._reporter = reporter
self._auth_tokens = auth_tokens
self._user = user
self._groups = groups
+ self._request_timeout = request_timeout
+
+ @property
+ def request_timeout(self):
+ return (
+ settings.default_request_timeout
+ if self._request_timeout is None
+ else self._request_timeout
+ )
- def call_node(self, node_addr, request, data):
+ def call_node(self, node_addr, request, data, request_timeout=None):
"""
Send a request to a node
node_addr destination node, instance of NodeAddresses
request command to be run on the node
data command parameters, encoded by format_data_* method
"""
- return self.call_host(node_addr.ring0, request, data)
+ return self.call_host(node_addr.ring0, request, data, request_timeout)
- def call_host(self, host, request, data):
+ def call_host(self, host, request, data, request_timeout=None):
"""
Send a request to a host
host host address
request command to be run on the host
data command parameters, encoded by format_data_* method
+ request timeout float timeout for request, if not set object property
+ will be used
"""
- opener = self.__get_opener()
+ def __debug_callback(data_type, debug_data):
+ prefixes = {
+ pycurl.DEBUG_TEXT: b"* ",
+ pycurl.DEBUG_HEADER_IN: b"< ",
+ pycurl.DEBUG_HEADER_OUT: b"> ",
+ pycurl.DEBUG_DATA_IN: b"<< ",
+ pycurl.DEBUG_DATA_OUT: b">> ",
+ }
+ if data_type in prefixes:
+ debug_output.write(prefixes[data_type])
+ debug_output.write(debug_data)
+ if not debug_data.endswith(b"\n"):
+ debug_output.write(b"\n")
+
+ output = io.BytesIO()
+ debug_output = io.BytesIO()
+ cookies = self.__prepare_cookies(host)
+ timeout = (
+ request_timeout
+ if request_timeout is not None
+ else self.request_timeout
+ )
url = "https://{host}:2224/{request}".format(
host=("[{0}]".format(host) if ":" in host else host),
request=request
)
- cookies = self.__prepare_cookies(host)
+
+ handler = pycurl.Curl()
+ handler.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTPS)
+ handler.setopt(pycurl.TIMEOUT_MS, int(timeout * 1000))
+ handler.setopt(pycurl.URL, url.encode("utf-8"))
+ handler.setopt(pycurl.WRITEFUNCTION, output.write)
+ handler.setopt(pycurl.VERBOSE, 1)
+ handler.setopt(pycurl.DEBUGFUNCTION, __debug_callback)
+ handler.setopt(pycurl.SSL_VERIFYHOST, 0)
+ handler.setopt(pycurl.SSL_VERIFYPEER, 0)
+ handler.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading
if cookies:
- opener.addheaders.append(("Cookie", ";".join(cookies)))
+ handler.setopt(pycurl.COOKIE, ";".join(cookies).encode("utf-8"))
+ if data:
+ handler.setopt(pycurl.COPYPOSTFIELDS, data.encode("utf-8"))
msg = "Sending HTTP Request to: {url}"
if data:
@@ -559,81 +625,73 @@ class NodeCommunicator(object):
)
try:
- # python3 requires data to be bytes not str
- if data:
- data = data.encode("utf-8")
- result = opener.open(url, data)
- # python3 returns bytes not str
- response_data = result.read().decode("utf-8")
+ handler.perform()
+ response_data = output.getvalue().decode("utf-8")
+ response_code = handler.getinfo(pycurl.RESPONSE_CODE)
self._logger.debug(result_msg.format(
url=url,
- code=result.getcode(),
+ code=response_code,
response=response_data
))
- self._reporter.process(
- reports.node_communication_finished(
- url, result.getcode(), response_data
- )
- )
- return response_data
- except urllib_HTTPError as e:
- # python3 returns bytes not str
- response_data = e.read().decode("utf-8")
- self._logger.debug(result_msg.format(
- url=url,
- code=e.code,
- response=response_data
+ self._reporter.process(reports.node_communication_finished(
+ url, response_code, response_data
))
- self._reporter.process(
- reports.node_communication_finished(url, e.code, response_data)
- )
- if e.code == 400:
+ if response_code == 400:
# old pcsd protocol: error messages are commonly passed in plain
# text in response body with HTTP code 400
# we need to be backward compatible with that
raise NodeCommandUnsuccessfulException(
host, request, response_data.rstrip()
)
- elif e.code == 401:
+ elif response_code == 401:
raise NodeAuthenticationException(
- host, request, "HTTP error: {0}".format(e.code)
+ host, request, "HTTP error: {0}".format(response_code)
)
- elif e.code == 403:
+ elif response_code == 403:
raise NodePermissionDeniedException(
- host, request, "HTTP error: {0}".format(e.code)
+ host, request, "HTTP error: {0}".format(response_code)
)
- elif e.code == 404:
+ elif response_code == 404:
raise NodeUnsupportedCommandException(
- host, request, "HTTP error: {0}".format(e.code)
+ host, request, "HTTP error: {0}".format(response_code)
)
- else:
+ elif response_code >= 400:
raise NodeCommunicationException(
- host, request, "HTTP error: {0}".format(e.code)
+ host, request, "HTTP error: {0}".format(response_code)
)
- except urllib_URLError as e:
+ return response_data
+ except pycurl.error as e:
+ # In pycurl versions lower then 7.19.3 it is not possible to set
+ # NOPROXY option. Therefore for the proper support of proxy settings
+ # we have to use environment variables.
+ if is_proxy_set(os.environ):
+ self._logger.warning("Proxy is set")
+ self._reporter.process(
+ reports.node_communication_proxy_is_set()
+ )
+ errno, reason = e.args
msg = "Unable to connect to {node} ({reason})"
- self._logger.debug(msg.format(node=host, reason=e.reason))
+ self._logger.debug(msg.format(node=host, reason=reason))
self._reporter.process(
- reports.node_communication_not_connected(host, e.reason)
+ reports.node_communication_not_connected(host, reason)
)
- raise NodeConnectionException(host, request, e.reason)
-
- def __get_opener(self):
- # enable self-signed certificates
- # https://www.python.org/dev/peps/pep-0476/
- # http://bugs.python.org/issue21308
- if (
- hasattr(ssl, "_create_unverified_context")
- and
- "context" in inspect.getargspec(urllib_HTTPSHandler.__init__).args
- ):
- opener = urllib_build_opener(
- urllib_HTTPSHandler(context=ssl._create_unverified_context()),
- urllib_HTTPCookieProcessor()
+ if errno == pycurl.E_OPERATION_TIMEDOUT:
+ raise NodeConnectionTimedOutException(host, request, reason)
+ else:
+ raise NodeConnectionException(host, request, reason)
+ finally:
+ debug_data = debug_output.getvalue().decode("utf-8", "ignore")
+ self._logger.debug(
+ (
+ "Communication debug info for calling: {url}\n"
+ "--Debug Communication Info Start--\n"
+ "{data}\n"
+ "--Debug Communication Info End--"
+ ).format(url=url, data=debug_data)
+ )
+ self._reporter.process(
+ reports.node_communication_debug_info(url, debug_data)
)
- else:
- opener = urllib_build_opener(urllib_HTTPCookieProcessor())
- return opener
def __prepare_cookies(self, host):
# Let's be safe about characters in variables (they can come from env)
@@ -649,7 +707,9 @@ class NodeCommunicator(object):
if self._groups:
cookies.append("CIB_user_groups={0}".format(
# python3 requires the value to be bytes not str
- base64.b64encode(" ".join(self._groups).encode("utf-8"))
+ base64.b64encode(
+ " ".join(self._groups).encode("utf-8")
+ ).decode("utf-8")
))
return cookies
diff --git a/pcs/lib/node.py b/pcs/lib/node.py
index f3bfe57..3de4843 100644
--- a/pcs/lib/node.py
+++ b/pcs/lib/node.py
@@ -9,6 +9,16 @@ from __future__ import (
class NodeNotFound(Exception):
pass
+def node_addresses_contain_host(node_addresses_list, host):
+ return (
+ host in [node.ring0 for node in node_addresses_list]
+ or
+ host in [node.ring1 for node in node_addresses_list if node.ring1]
+ )
+
+def node_addresses_contain_name(node_addresses_list, name):
+ return name in [node.name for node in node_addresses_list]
+
class NodeAddresses(object):
def __init__(self, ring0, ring1=None, name=None, id=None):
@@ -29,6 +39,20 @@ class NodeAddresses(object):
def __lt__(self, other):
return self.label < other.label
+ def __repr__(self):
+ #the "dict" with name and id is "written" inside string because in
+ #python3 the order is not
+ return str("<{0}.{1} {2}, {{'name': {3}, 'id': {4}}}>").format(
+ self.__module__,
+ self.__class__.__name__,
+ repr(
+ [self.ring0] if self.ring1 is None
+ else [self.ring0, self.ring1]
+ ),
+ repr(self.name),
+ repr(self.id),
+ )
+
@property
def ring0(self):
return self._ring0
@@ -71,6 +95,13 @@ class NodeAddressesList(object):
def __reversed__(self):
return self._list.__reversed__()
+ def __add__(self, other):
+ if isinstance(other, NodeAddressesList):
+ return NodeAddressesList(self._list + other._list)
+ #Suppose that the other is a list. If it is not a list it correctly
+ #raises.
+ return NodeAddressesList(self._list + other)
+
def find_by_label(self, label):
for node in self._list:
if node.label == label:
diff --git a/pcs/lib/node_communication_format.py b/pcs/lib/node_communication_format.py
new file mode 100644
index 0000000..f05d9b2
--- /dev/null
+++ b/pcs/lib/node_communication_format.py
@@ -0,0 +1,161 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+from collections import namedtuple
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+import base64
+
+def create_pcmk_remote_actions(action_list):
+ return dict([
+ (
+ "pacemaker_remote {0}".format(action),
+ service_cmd_format(
+ "pacemaker_remote",
+ action
+ )
+ )
+ for action in action_list
+ ])
+
+def pcmk_authkey_format(authkey_content):
+ """
+ Return a dict usable in the communication with a remote/put_file
+ authkey_content is raw authkey content
+ """
+ return {
+ "data": base64.b64encode(authkey_content).decode("utf-8"),
+ "type": "pcmk_remote_authkey",
+ "rewrite_existing": True,
+ }
+
+def corosync_authkey_format(authkey_content):
+ """
+ Return a dict usable in the communication with a remote/put_file
+ authkey_content is raw authkey content
+ """
+ return {
+ "data": base64.b64encode(authkey_content).decode("utf-8"),
+ "type": "corosync_authkey",
+ "rewrite_existing": True,
+ }
+
+def pcmk_authkey_file(authkey_content):
+ return {
+ "pacemaker_remote authkey": pcmk_authkey_format(authkey_content)
+ }
+
+def corosync_authkey_file(authkey_content):
+ return {
+ "corosync authkey": corosync_authkey_format(authkey_content)
+ }
+
+def service_cmd_format(service, command):
+ """
+ Return a dict usable in the communication with a remote/run_action
+ string service is name of requested service (eg. pacemaker_remote)
+ string command specifies an action on service (eg. start)
+ """
+ return {
+ "type": "service_command",
+ "service": service,
+ "command": command,
+ }
+
+class Result(namedtuple("Result", "code message")):
+ """ Wrapper over some call results """
+
+def unpack_items_from_response(main_response, main_key, node_label):
+ """
+ Check format of main_response and return main_response[main_key].
+ dict main_response has on the key 'main_key' dict with item name as key and
+ dict with result as value. E.g.
+ {
+ "files": {
+ "file1": {"code": "success", "message": ""}
+ }
+ }
+ string main_key is name of key under that is a dict with results
+ string node_label is a node label for reporting an invalid format
+ """
+ is_in_expected_format = (
+ isinstance(main_response, dict)
+ and
+ main_key in main_response
+ and
+ isinstance(main_response[main_key], dict)
+ )
+
+ if not is_in_expected_format:
+ raise LibraryError(reports.invalid_response_format(node_label))
+
+ return main_response[main_key]
+
+def response_items_to_result(response_items, expected_keys, node_label):
+ """
+ Check format of response_items and return dict where keys are transformed to
+ Result. E.g.
+ {"file1": {"code": "success", "message": ""}}
+ ->
+ {"file1": Result("success", "")}}
+
+ dict resposne_items has item name as key and dict with result as value.
+ list expected_keys contains expected keys in a dict main_response[main_key]
+ string node_label is a node label for reporting an invalid format
+ """
+ if set(expected_keys) != set(response_items.keys()):
+ raise LibraryError(reports.invalid_response_format(node_label))
+
+ for result in response_items.values():
+ if(
+ not isinstance(result, dict)
+ or
+ "code" not in result
+ or
+ "message" not in result
+ ):
+ raise LibraryError(reports.invalid_response_format(node_label))
+
+ return dict([
+ (
+ file_key,
+ Result(raw_result["code"], raw_result["message"])
+ )
+ for file_key, raw_result in response_items.items()
+ ])
+
+
+def response_to_result(
+ main_response, main_key, expected_keys, node_label
+):
+ """
+ Validate response (from remote/put_file or remote/run_action) and transform
+ results from dict to Result.
+
+ dict main_response has on the key 'main_key' dict with item name as key and
+ dict with result as value. E.g.
+ {
+ "files": {
+ "file1": {"code": "success", "message": ""}
+ }
+ }
+ string main_key is name of key under that is a dict with results
+ list expected_keys contains expected keys in a dict main_response[main_key]
+ string node_label is a node label for reporting an invalid format
+ """
+ return response_items_to_result(
+ unpack_items_from_response(main_response, main_key, node_label),
+ expected_keys,
+ node_label
+ )
+
+def get_format_result(code_message_map):
+ def format_result(result):
+ if result.code in code_message_map:
+ return code_message_map[result.code]
+
+ return result.message
+ return format_result
diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
index e94d327..703609b 100644
--- a/pcs/lib/nodes_task.py
+++ b/pcs/lib/nodes_task.py
@@ -5,15 +5,17 @@ from __future__ import (
unicode_literals,
)
+from collections import defaultdict
import json
from pcs.common import report_codes
from pcs.common.tools import run_parallel as tools_run_parallel
-from pcs.lib import reports
-from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs.lib import reports, node_communication_format
+from pcs.lib.errors import LibraryError, ReportItemSeverity, ReportListAnalyzer
from pcs.lib.external import (
NodeCommunicator,
NodeCommunicationException,
+ NodeCommandUnsuccessfulException,
node_communicator_exception_to_report_item,
parallel_nodes_communication_helper,
)
@@ -23,6 +25,56 @@ from pcs.lib.corosync import (
)
+def _call_for_json(
+ node_communicator, node, request_path, report_items,
+ data=None, request_timeout=None, warn_on_communication_exception=False
+):
+ """
+ Return python object parsed from a json call response.
+ """
+ try:
+ return json.loads(node_communicator.call_node(
+ node,
+ request_path,
+ data=None if data is None
+ else NodeCommunicator.format_data_dict(data)
+ ,
+ request_timeout=request_timeout
+ ))
+ except NodeCommandUnsuccessfulException as e:
+ report_items.append(
+ reports.node_communication_command_unsuccessful(
+ e.node,
+ e.command,
+ e.reason,
+ severity=(
+ ReportItemSeverity.WARNING
+ if warn_on_communication_exception else
+ ReportItemSeverity.ERROR
+ ),
+ forceable=(
+ None if warn_on_communication_exception
+ else report_codes.SKIP_OFFLINE_NODES
+ ),
+ )
+ )
+
+ except NodeCommunicationException as e:
+ report_items.append(
+ node_communicator_exception_to_report_item(
+ e,
+ ReportItemSeverity.WARNING if warn_on_communication_exception
+ else ReportItemSeverity.ERROR
+ ,
+ forceable=None if warn_on_communication_exception
+ else report_codes.SKIP_OFFLINE_NODES
+ )
+ )
+ except ValueError:
+ #e.g. response is not in json format
+ report_items.append(reports.invalid_response_format(node.label))
+
+
def distribute_corosync_conf(
node_communicator, reporter, node_addr_list, config_text,
skip_offline_nodes=False
@@ -177,3 +229,276 @@ def node_check_auth(communicator, node):
"remote/check_auth",
NodeCommunicator.format_data_dict({"check_auth_only": 1})
)
+
+def availability_checker_node(availability_info, report_items, node_label):
+ """
+ Check if availability_info means that the node is suitable as cluster
+ (corosync) node.
+ """
+ if availability_info["node_available"]:
+ return
+
+ if availability_info.get("pacemaker_running", False):
+ report_items.append(reports.cannot_add_node_is_running_service(
+ node_label,
+ "pacemaker"
+ ))
+ return
+
+ if availability_info.get("pacemaker_remote", False):
+ report_items.append(reports.cannot_add_node_is_running_service(
+ node_label,
+ "pacemaker_remote"
+ ))
+ return
+
+ report_items.append(reports.cannot_add_node_is_in_cluster(node_label))
+
+def availability_checker_remote_node(
+ availability_info, report_items, node_label
+):
+ """
+ Check if availability_info means that the node is suitable as remote node.
+ """
+ if availability_info["node_available"]:
+ return
+
+ if availability_info.get("pacemaker_running", False):
+ report_items.append(reports.cannot_add_node_is_running_service(
+ node_label,
+ "pacemaker"
+ ))
+ return
+
+ if not availability_info.get("pacemaker_remote", False):
+ report_items.append(reports.cannot_add_node_is_in_cluster(node_label))
+ return
+
+
+def check_can_add_node_to_cluster(
+ node_communicator, node, report_items,
+ check_response=availability_checker_node
+):
+ """
+ Analyze result of node_available check if it is possible use the node as
+ cluster node.
+
+ NodeCommunicator node_communicator is an object for making the http request
+ NodeAddresses node specifies the destination url
+ list report_items is place where report items should be collected
+ callable check_response -- make decision about availability based on
+ response info
+ """
+ safe_report_items = []
+ availability_info = _call_for_json(
+ node_communicator,
+ node,
+ "remote/node_available",
+ safe_report_items
+ )
+ report_items.extend(safe_report_items)
+
+ if ReportListAnalyzer(safe_report_items).error_list:
+ return
+
+ is_in_expected_format = (
+ isinstance(availability_info, dict)
+ and
+ #node_available is a mandatory field
+ "node_available" in availability_info
+ )
+
+ if not is_in_expected_format:
+ report_items.append(reports.invalid_response_format(node.label))
+ return
+
+ check_response(availability_info, report_items, node.label)
+
+def run_actions_on_node(
+ node_communicator, path, response_key, report_processor, node, actions,
+ warn_on_communication_exception=False
+):
+ """
+ NodeCommunicator node_communicator is an object for making the http request
+ NodeAddresses node specifies the destination url
+ dict actions has key that identifies the action and value is a dict
+ with a data that are specific per action type. Mandatory keys there are:
+ * type - is type of file like "booth_autfile" or "pcmk_remote_authkey"
+ For type == 'service_command' are mandatory
+ * service - specify the service (eg. pacemaker_remote)
+ * command - specify the command should be applied on service
+ (eg. enable or start)
+ """
+ report_items = []
+ action_results = _call_for_json(
+ node_communicator,
+ node,
+ path,
+ report_items,
+ [("data_json", json.dumps(actions))],
+ warn_on_communication_exception=warn_on_communication_exception
+ )
+
+ #can raise
+ report_processor.process_list(report_items)
+ #If there was a communication error and --skip-offline is in effect, no
+ #exception was raised. If there is no result cannot process it.
+ #Note: the error may be caused by older pcsd daemon not supporting commands
+ #sent by newer client.
+ if not action_results:
+ return
+
+
+ return node_communication_format.response_to_result(
+ action_results,
+ response_key,
+ actions.keys(),
+ node.label,
+ )
+
+def _run_actions_on_multiple_nodes(
+ node_communicator, url, response_key, report_processor, create_start_report,
+ actions, node_addresses_list, is_success,
+ create_success_report, create_error_report, force_code, format_result,
+ allow_incomplete_distribution=False, description=""
+):
+ error_map = defaultdict(dict)
+ def worker(node_addresses):
+ result = run_actions_on_node(
+ node_communicator,
+ url,
+ response_key,
+ report_processor,
+ node_addresses,
+ actions,
+ warn_on_communication_exception=allow_incomplete_distribution,
+ )
+ #If there was a communication error and --skip-offline is in effect, no
+ #exception was raised. If there is no result cannot process it.
+ #Note: the error may be caused by older pcsd daemon not supporting
+ #commands sent by newer client.
+ if not result:
+ return
+
+ for key, item_response in sorted(result.items()):
+ if is_success(key, item_response):
+ #only success process individually
+ report_processor.process(
+ create_success_report(node_addresses.label, key)
+ )
+ else:
+ error_map[node_addresses.label][key] = format_result(
+ item_response
+ )
+
+ report_processor.process(create_start_report(
+ actions.keys(),
+ [node.label for node in node_addresses_list],
+ description
+ ))
+
+ parallel_nodes_communication_helper(
+ worker,
+ [([node_addresses], {}) for node_addresses in node_addresses_list],
+ report_processor,
+ allow_incomplete_distribution,
+ )
+
+ #now we process errors
+ if error_map:
+ make_report = reports.get_problem_creator(
+ force_code,
+ allow_incomplete_distribution
+ )
+ report_processor.process_list([
+ make_report(create_error_report, node_name, action_key, message)
+ for node_name, errors in error_map.items()
+ for action_key, message in errors.items()
+ ])
+
+def distribute_files(
+ node_communicator, report_processor, file_definitions, node_addresses_list,
+ allow_incomplete_distribution=False, description=""
+):
+ """
+ Put files specified in file_definitions to nodes specified in
+ node_addresses_list.
+
+ NodeCommunicator node_communicator is an object for making the http request
+ NodeAddresses node specifies the destination url
+ dict file_definitions has key that identifies the file and value is a dict
+ with a data that are specific per file type. Mandatory keys there are:
+ * type - is type of file like "booth_autfile" or "pcmk_remote_authkey"
+ * data - it contains content of file in file specific format (e.g.
+ binary is encoded by base64)
+ Common optional key is "rewrite_existing" (True/False) that specifies
+ the behaviour when file already exists.
+ bool allow_incomplete_distribution keep success even if some node(s) are
+ unavailable
+ """
+ _run_actions_on_multiple_nodes(
+ node_communicator,
+ "remote/put_file",
+ "files",
+ report_processor,
+ reports.files_distribution_started,
+ file_definitions,
+ node_addresses_list,
+ lambda key, response: response.code in [
+ "written",
+ "rewritten",
+ "same_content",
+ ],
+ reports.file_distribution_success,
+ reports.file_distribution_error,
+ report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
+ node_communication_format.get_format_result({
+ "conflict": "File already exists",
+ }),
+ allow_incomplete_distribution,
+ description,
+ )
+
+def remove_files(
+ node_communicator, report_processor, file_definitions, node_addresses_list,
+ allow_incomplete_distribution=False, description=""
+):
+ _run_actions_on_multiple_nodes(
+ node_communicator,
+ "remote/remove_file",
+ "files",
+ report_processor,
+ reports.files_remove_from_node_started,
+ file_definitions,
+ node_addresses_list,
+ lambda key, response: response.code in ["deleted", "not_found"],
+ reports.file_remove_from_node_success,
+ reports.file_remove_from_node_error,
+ report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
+ node_communication_format.get_format_result({}),
+ allow_incomplete_distribution,
+ description,
+ )
+
+def run_actions_on_multiple_nodes(
+ node_communicator, report_processor, action_definitions, is_success,
+ node_addresses_list, allow_fails=False, description=""
+):
+ _run_actions_on_multiple_nodes(
+ node_communicator,
+ "remote/manage_services",
+ "actions",
+ report_processor,
+ reports.service_commands_on_nodes_started,
+ action_definitions,
+ node_addresses_list,
+ is_success,
+ reports.service_command_on_node_success,
+ reports.service_command_on_node_error,
+ report_codes.SKIP_ACTION_ON_NODES_ERRORS,
+ node_communication_format.get_format_result({
+ "fail": "Operation failed.",
+ }),
+ allow_fails,
+ description,
+ )
diff --git a/pcs/test/tools/test/__init__.py b/pcs/lib/pacemaker/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/lib/pacemaker/__init__.py
diff --git a/pcs/lib/pacemaker/env.py b/pcs/lib/pacemaker/env.py
new file mode 100644
index 0000000..d852ba4
--- /dev/null
+++ b/pcs/lib/pacemaker/env.py
@@ -0,0 +1,28 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.common import env_file_role_codes
+from pcs.lib.env_file import RealFile
+from pcs import settings
+
+
+class PacemakerEnv(object):
+ def __init__(self):
+ """
+ callable get_cib should return cib as lxml tree
+ """
+ self.__authkey = RealFile(
+ file_role=env_file_role_codes.PACEMAKER_AUTHKEY,
+ file_path=settings.pacemaker_authkey_file,
+ )
+
+ @property
+ def has_authkey(self):
+ return self.__authkey.exists
+
+ def get_authkey_content(self):
+ return self.__authkey.read()
diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker/live.py
similarity index 62%
rename from pcs/lib/pacemaker.py
rename to pcs/lib/pacemaker/live.py
index 6747b22..e58a264 100644
--- a/pcs/lib/pacemaker.py
+++ b/pcs/lib/pacemaker/live.py
@@ -9,22 +9,26 @@ import os.path
from lxml import etree
from pcs import settings
-from pcs.common.tools import join_multilines
+from pcs.common.tools import (
+ join_multilines,
+ xml_fromstring
+)
from pcs.lib import reports
+from pcs.lib.cib.tools import get_pacemaker_version_by_which_cib_was_validated
from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker_state import ClusterState
+from pcs.lib.pacemaker.state import ClusterState
__EXITCODE_WAIT_TIMEOUT = 62
__EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT = 6
+__EXITCODE_CIB_SCHEMA_IS_THE_LATEST_AVAILABLE = 211
__RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD = 100
class CrmMonErrorException(LibraryError):
pass
-# syntactic sugar for getting a full path to a pacemaker executable
-def __exec(name):
- return os.path.join(settings.pacemaker_binaries, name)
+
+### status
def get_cluster_status_xml(runner):
stdout, stderr, retval = runner.run(
@@ -36,6 +40,8 @@ def get_cluster_status_xml(runner):
)
return stdout
+### cib
+
def get_cib_xml(runner, scope=None):
command = [__exec("cibadmin"), "--local", "--query"]
if scope:
@@ -55,85 +61,86 @@ def get_cib_xml(runner, scope=None):
)
return stdout
+def parse_cib_xml(xml):
+ return xml_fromstring(xml)
+
def get_cib(xml):
try:
- return etree.fromstring(xml)
+ return parse_cib_xml(xml)
except (etree.XMLSyntaxError, etree.DocumentInvalid):
raise LibraryError(reports.cib_load_error_invalid_format())
-def replace_cib_configuration_xml(runner, xml, cib_upgraded=False):
- cmd = [__exec("cibadmin"), "--replace", "--verbose", "--xml-pipe"]
- if not cib_upgraded:
- cmd += ["--scope", "configuration"]
+def replace_cib_configuration_xml(runner, xml):
+ cmd = [
+ __exec("cibadmin"),
+ "--replace",
+ "--verbose",
+ "--xml-pipe",
+ "--scope", "configuration",
+ ]
stdout, stderr, retval = runner.run(cmd, stdin_string=xml)
if retval != 0:
raise LibraryError(reports.cib_push_error(stderr, stdout))
-def replace_cib_configuration(runner, tree, cib_upgraded=False):
+def replace_cib_configuration(runner, tree):
#etree returns bytes: b'xml'
#python 3 removed .encode() from bytes
#run(...) calls subprocess.Popen.communicate which calls encode...
#so here is bytes to str conversion
xml = etree.tostring(tree).decode()
- return replace_cib_configuration_xml(runner, xml, cib_upgraded)
+ return replace_cib_configuration_xml(runner, xml)
-def get_local_node_status(runner):
- try:
- cluster_status = ClusterState(get_cluster_status_xml(runner))
- except CrmMonErrorException:
- return {"offline": True}
- node_name = __get_local_node_name(runner)
- for node_status in cluster_status.node_section.nodes:
- if node_status.attrs.name == node_name:
- result = {
- "offline": False,
- }
- for attr in (
- 'id', 'name', 'type', 'online', 'standby', 'standby_onfail',
- 'maintenance', 'pending', 'unclean', 'shutdown', 'expected_up',
- 'is_dc', 'resources_running',
- ):
- result[attr] = getattr(node_status.attrs, attr)
- return result
- raise LibraryError(reports.node_not_found(node_name))
+def ensure_cib_version(runner, cib, version):
+ """
+ This method ensures that specified cib is verified by pacemaker with
+ version 'version' or newer. If cib doesn't correspond to this version,
+ method will try to upgrade cib.
+ Returns cib which was verified by pacemaker version 'version' or later.
+ Raises LibraryError on any failure.
-def resource_cleanup(runner, resource=None, node=None, force=False):
- if not force and not node and not resource:
- summary = ClusterState(get_cluster_status_xml(runner)).summary
- operations = summary.nodes.attrs.count * summary.resources.attrs.count
- if operations > __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD:
- raise LibraryError(
- reports.resource_cleanup_too_time_consuming(
- __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD
- )
- )
+ CommandRunner runner
+ etree cib cib tree
+ tuple version tuple of integers (<major>, <minor>, <revision>)
+ """
+ current_version = get_pacemaker_version_by_which_cib_was_validated(cib)
+ if current_version >= version:
+ return None
- cmd = [__exec("crm_resource"), "--cleanup"]
- if resource:
- cmd.extend(["--resource", resource])
- if node:
- cmd.extend(["--node", node])
+ _upgrade_cib(runner)
+ new_cib_xml = get_cib_xml(runner)
- stdout, stderr, retval = runner.run(cmd)
+ try:
+ new_cib = parse_cib_xml(new_cib_xml)
+ except (etree.XMLSyntaxError, etree.DocumentInvalid) as e:
+ raise LibraryError(reports.cib_upgrade_failed(str(e)))
- if retval != 0:
+ current_version = get_pacemaker_version_by_which_cib_was_validated(new_cib)
+ if current_version >= version:
+ return new_cib
+
+ raise LibraryError(reports.unable_to_upgrade_cib_to_required_version(
+ current_version, version
+ ))
+
+def _upgrade_cib(runner):
+ """
+ Upgrade CIB to the latest schema available locally or clusterwise.
+ CommandRunner runner
+ """
+ stdout, stderr, retval = runner.run(
+ [__exec("cibadmin"), "--upgrade", "--force"]
+ )
+ # If we are already on the latest schema available, do not consider it an
+ # error. We do not know here what version is required. The caller however
+ # knows and is responsible for dealing with it.
+ if retval not in (0, __EXITCODE_CIB_SCHEMA_IS_THE_LATEST_AVAILABLE):
raise LibraryError(
- reports.resource_cleanup_error(
- join_multilines([stderr, stdout]),
- resource,
- node
- )
+ reports.cib_upgrade_failed(join_multilines([stderr, stdout]))
)
- # usefull output (what has been done) goes to stderr
- return join_multilines([stdout, stderr])
-def nodes_standby(runner, node_list=None, all_nodes=False):
- return __nodes_standby_unstandby(runner, True, node_list, all_nodes)
+### wait for idle
-def nodes_unstandby(runner, node_list=None, all_nodes=False):
- return __nodes_standby_unstandby(runner, False, node_list, all_nodes)
-
-def has_resource_wait_support(runner):
+def has_wait_for_idle_support(runner):
# returns 1 on success so we don't care about retval
stdout, stderr, dummy_retval = runner.run(
[__exec("crm_resource"), "-?"]
@@ -141,11 +148,17 @@ def has_resource_wait_support(runner):
# help goes to stderr but we check stdout as well if that gets changed
return "--wait" in stderr or "--wait" in stdout
-def ensure_resource_wait_support(runner):
- if not has_resource_wait_support(runner):
- raise LibraryError(reports.resource_wait_not_supported())
+def ensure_wait_for_idle_support(runner):
+ if not has_wait_for_idle_support(runner):
+ raise LibraryError(reports.wait_for_idle_not_supported())
+
+def wait_for_idle(runner, timeout=None):
+ """
+ Run waiting command. Raise LibraryError if command failed.
-def wait_for_resources(runner, timeout=None):
+ runner is preconfigured object for running external programs
+ string timeout is waiting timeout
+ """
args = [__exec("crm_resource"), "--wait"]
if timeout is not None:
args.append("--timeout={0}".format(timeout))
@@ -156,58 +169,20 @@ def wait_for_resources(runner, timeout=None):
# We use stdout just to be sure if that's get changed.
if retval == __EXITCODE_WAIT_TIMEOUT:
raise LibraryError(
- reports.resource_wait_timed_out(
+ reports.wait_for_idle_timed_out(
join_multilines([stderr, stdout])
)
)
else:
raise LibraryError(
- reports.resource_wait_error(
+ reports.wait_for_idle_error(
join_multilines([stderr, stdout])
)
)
-def __nodes_standby_unstandby(
- runner, standby=True, node_list=None, all_nodes=False
-):
- if node_list or all_nodes:
- # TODO once we switch to editing CIB instead of running crm_stanby, we
- # cannot always relly on getClusterState. If we're not editing a CIB
- # from a live cluster, there is no status.
- state = ClusterState(get_cluster_status_xml(runner)).node_section.nodes
- known_nodes = [node.attrs.name for node in state]
-
- if all_nodes:
- node_list = known_nodes
- elif node_list:
- report = []
- for node in node_list:
- if node not in known_nodes:
- report.append(reports.node_not_found(node))
- if report:
- raise LibraryError(*report)
-
- # TODO Edit CIB directly instead of running commands for each node; be aware
- # remote nodes might not be in the CIB yet so we need to put them there.
- cmd_template = [__exec("crm_standby")]
- cmd_template.extend(["-v", "on"] if standby else ["-D"])
- cmd_list = []
- if node_list:
- for node in node_list:
- cmd_list.append(cmd_template + ["-N", node])
- else:
- cmd_list.append(cmd_template)
- report = []
- for cmd in cmd_list:
- stdout, stderr, retval = runner.run(cmd)
- if retval != 0:
- report.append(
- reports.common_error(join_multilines([stderr, stdout]))
- )
- if report:
- raise LibraryError(*report)
+### nodes
-def __get_local_node_name(runner):
+def get_local_node_name(runner):
# It would be possible to run "crm_node --name" to get the name in one call,
# but it returns false names when cluster is not running (or we are on
# a remote node). Getting node id first is reliable since it fails in those
@@ -235,3 +210,76 @@ def __get_local_node_name(runner):
reports.pacemaker_local_node_name_not_found("node name is null")
)
return node_name
+
+def get_local_node_status(runner):
+ try:
+ cluster_status = ClusterState(get_cluster_status_xml(runner))
+ except CrmMonErrorException:
+ return {"offline": True}
+ node_name = get_local_node_name(runner)
+ for node_status in cluster_status.node_section.nodes:
+ if node_status.attrs.name == node_name:
+ result = {
+ "offline": False,
+ }
+ for attr in (
+ 'id', 'name', 'type', 'online', 'standby', 'standby_onfail',
+ 'maintenance', 'pending', 'unclean', 'shutdown', 'expected_up',
+ 'is_dc', 'resources_running',
+ ):
+ result[attr] = getattr(node_status.attrs, attr)
+ return result
+ raise LibraryError(reports.node_not_found(node_name))
+
+def remove_node(runner, node_name):
+ stdout, stderr, retval = runner.run([
+ __exec("crm_node"),
+ "--force",
+ "--remove",
+ node_name,
+ ])
+ if retval != 0:
+ raise LibraryError(
+ reports.node_remove_in_pacemaker_failed(
+ node_name,
+ reason=join_multilines([stderr, stdout])
+ )
+ )
+
+### resources
+
+def resource_cleanup(runner, resource=None, node=None, force=False):
+ if not force and not node and not resource:
+ summary = ClusterState(get_cluster_status_xml(runner)).summary
+ operations = summary.nodes.attrs.count * summary.resources.attrs.count
+ if operations > __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD:
+ raise LibraryError(
+ reports.resource_cleanup_too_time_consuming(
+ __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD
+ )
+ )
+
+ cmd = [__exec("crm_resource"), "--cleanup"]
+ if resource:
+ cmd.extend(["--resource", resource])
+ if node:
+ cmd.extend(["--node", node])
+
+ stdout, stderr, retval = runner.run(cmd)
+
+ if retval != 0:
+ raise LibraryError(
+ reports.resource_cleanup_error(
+ join_multilines([stderr, stdout]),
+ resource,
+ node
+ )
+ )
+ # usefull output (what has been done) goes to stderr
+ return join_multilines([stdout, stderr])
+
+### tools
+
+# shortcut for getting a full path to a pacemaker executable
+def __exec(name):
+ return os.path.join(settings.pacemaker_binaries, name)
diff --git a/pcs/lib/pacemaker_state.py b/pcs/lib/pacemaker/state.py
similarity index 53%
rename from pcs/lib/pacemaker_state.py
rename to pcs/lib/pacemaker/state.py
index b413b90..71809db 100644
--- a/pcs/lib/pacemaker_state.py
+++ b/pcs/lib/pacemaker/state.py
@@ -11,13 +11,22 @@ from __future__ import (
)
import os.path
+from collections import defaultdict
from lxml import etree
from pcs import settings
+from pcs.common.tools import xml_fromstring
from pcs.lib import reports
-from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker_values import is_true
+from pcs.lib.errors import LibraryError, ReportItemSeverity as severities
+from pcs.lib.pacemaker.values import (
+ is_false,
+ is_true,
+)
+from pcs.lib.xml_tools import find_parent
+
+class ResourceNotFound(Exception):
+ pass
class _Attrs(object):
def __init__(self, owner_name, attrib, required_attrs):
@@ -133,9 +142,9 @@ class _NodeSection(_Element):
'nodes': ('node', _Node),
}
-def _get_valid_cluster_state_dom(xml):
+def get_cluster_state_dom(xml):
try:
- dom = etree.fromstring(xml)
+ dom = xml_fromstring(xml)
if os.path.isfile(settings.crm_mon_schema):
etree.RelaxNG(file=settings.crm_mon_schema).assertValid(dom)
return dom
@@ -149,5 +158,106 @@ class ClusterState(_Element):
}
def __init__(self, xml):
- self.dom = _get_valid_cluster_state_dom(xml)
+ self.dom = get_cluster_state_dom(xml)
super(ClusterState, self).__init__(self.dom)
+
+def _id_xpath_predicate(resource_id):
+ return """(@id="{0}" or starts-with(@id, "{0}:"))""".format(resource_id)
+
+def _get_primitives_for_state_check(
+ cluster_state, resource_id, expected_running
+):
+ primitives = cluster_state.xpath("""
+ .//resource[{predicate_id}]
+ |
+ .//group[{predicate_id}]/resource[{predicate_position}]
+ |
+ .//clone[@id="{id}"]/resource
+ |
+ .//clone[@id="{id}"]/group/resource[{predicate_position}]
+ |
+ .//bundle[@id="{id}"]/replica/resource
+ """.format(
+ id=resource_id,
+ predicate_id=_id_xpath_predicate(resource_id),
+ predicate_position=("last()" if expected_running else "1")
+ ))
+ return [
+ element for element in primitives
+ if not is_true(element.attrib.get("failed", ""))
+ ]
+
+def _get_primitive_roles_with_nodes(primitive_el_list):
+ # Clone resources are represented by multiple primitive elements.
+ roles_with_nodes = defaultdict(set)
+ for resource_element in primitive_el_list:
+ if resource_element.attrib["role"] in ["Started", "Master", "Slave"]:
+ roles_with_nodes[resource_element.attrib["role"]].update([
+ node.attrib["name"]
+ for node in resource_element.findall(".//node")
+ ])
+ return dict([
+ (role, sorted(nodes))
+ for role, nodes in roles_with_nodes.items()
+ ])
+
+def ensure_resource_state(expected_running, cluster_state, resource_id):
+ roles_with_nodes = _get_primitive_roles_with_nodes(
+ _get_primitives_for_state_check(
+ cluster_state,
+ resource_id,
+ expected_running
+ )
+ )
+ if not roles_with_nodes:
+ return reports.resource_does_not_run(
+ resource_id,
+ severities.INFO if not expected_running else severities.ERROR
+ )
+ return reports.resource_running_on_nodes(
+ resource_id,
+ roles_with_nodes,
+ severities.INFO if expected_running else severities.ERROR
+ )
+
+def ensure_resource_running(cluster_state, resource_id):
+ return ensure_resource_state(
+ expected_running=True,
+ cluster_state=cluster_state,
+ resource_id=resource_id,
+ )
+
+def is_resource_managed(cluster_state, resource_id):
+ """
+ Check if the resource is managed
+
+ etree cluster_state -- status of the cluster
+ string resource_id -- id of the resource
+ """
+ primitive_list = cluster_state.xpath("""
+ .//resource[{predicate_id}]
+ |
+ .//group[{predicate_id}]/resource
+ """.format(predicate_id=_id_xpath_predicate(resource_id))
+ )
+ if primitive_list:
+ for primitive in primitive_list:
+ if is_false(primitive.attrib.get("managed", "")):
+ return False
+ clone = find_parent(primitive, ["clone"])
+ if clone is not None and is_false(clone.attrib.get("managed", "")):
+ return False
+ return True
+
+ clone_list = cluster_state.xpath(
+ """.//clone[@id="{0}"]""".format(resource_id)
+ )
+ for clone in clone_list:
+ if is_false(clone.attrib.get("managed", "")):
+ return False
+ for primitive in clone.xpath(".//resource"):
+ if is_false(primitive.attrib.get("managed", "")):
+ return False
+ return True
+
+ raise ResourceNotFound(resource_id)
diff --git a/pcs/test/tools/test/__init__.py b/pcs/lib/pacemaker/test/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/lib/pacemaker/test/__init__.py
diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/lib/pacemaker/test/test_live.py
similarity index 74%
rename from pcs/test/test_lib_pacemaker.py
rename to pcs/lib/pacemaker/test/test_live.py
index 7ca7b77..7a53389 100644
--- a/pcs/test/test_lib_pacemaker.py
+++ b/pcs/lib/pacemaker/test/test_live.py
@@ -5,20 +5,21 @@ from __future__ import (
unicode_literals,
)
-from pcs.test.tools.pcs_unittest import TestCase
+from lxml import etree
import os.path
from pcs.test.tools.assertions import (
assert_raise_library_error,
assert_xml_equal,
+ start_tag_error_text,
)
from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.pcs_unittest import TestCase, mock
from pcs.test.tools.xml import XmlManipulation
from pcs import settings
from pcs.common import report_codes
-from pcs.lib import pacemaker as lib
+import pcs.lib.pacemaker.live as lib
from pcs.lib.errors import ReportItemSeverity as Severity
from pcs.lib.external import CommandRunner
@@ -244,27 +245,6 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
stdin_string=xml
)
- def test_cib_upgraded(self):
- xml = "<xml/>"
- expected_stdout = "expected output"
- expected_stderr = ""
- expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
- expected_stdout,
- expected_stderr,
- expected_retval
- )
-
- lib.replace_cib_configuration(
- mock_runner, XmlManipulation.from_str(xml).tree, True
- )
-
- mock_runner.run.assert_called_once_with(
- [self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe"],
- stdin_string=xml
- )
-
def test_error(self):
xml = "<xml/>"
expected_stdout = "expected output"
@@ -301,6 +281,132 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
stdin_string=xml
)
+class UpgradeCibTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+ def test_success(self):
+ self.mock_runner.run.return_value = "", "", 0
+ lib._upgrade_cib(self.mock_runner)
+ self.mock_runner.run.assert_called_once_with(
+ ["/usr/sbin/cibadmin", "--upgrade", "--force"]
+ )
+
+ def test_error(self):
+ error = "Call cib_upgrade failed (-62): Timer expired"
+ self.mock_runner.run.return_value = "", error, 62
+ assert_raise_library_error(
+ lambda: lib._upgrade_cib(self.mock_runner),
+ (
+ Severity.ERROR,
+ report_codes.CIB_UPGRADE_FAILED,
+ {
+ "reason": error,
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with(
+ ["/usr/sbin/cibadmin", "--upgrade", "--force"]
+ )
+
+ def test_already_at_latest_schema(self):
+ error = ("Call cib_upgrade failed (-211): Schema is already "
+ "the latest available")
+ self.mock_runner.run.return_value = "", error, 211
+ lib._upgrade_cib(self.mock_runner)
+ self.mock_runner.run.assert_called_once_with(
+ ["/usr/sbin/cibadmin", "--upgrade", "--force"]
+ )
+
+ at mock.patch("pcs.lib.pacemaker.live.get_cib_xml")
+ at mock.patch("pcs.lib.pacemaker.live._upgrade_cib")
+class EnsureCibVersionTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ self.cib = etree.XML('<cib validate-with="pacemaker-2.3.4"/>')
+
+ def test_same_version(self, mock_upgrade, mock_get_cib):
+ self.assertTrue(
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 4)
+ ) is None
+ )
+ mock_upgrade.assert_not_called()
+ mock_get_cib.assert_not_called()
+
+ def test_higher_version(self, mock_upgrade, mock_get_cib):
+ self.assertTrue(
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 3)
+ ) is None
+ )
+ mock_upgrade.assert_not_called()
+ mock_get_cib.assert_not_called()
+
+ def test_upgraded_same_version(self, mock_upgrade, mock_get_cib):
+ upgraded_cib = '<cib validate-with="pacemaker-2.3.5"/>'
+ mock_get_cib.return_value = upgraded_cib
+ assert_xml_equal(
+ upgraded_cib,
+ etree.tostring(
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 5)
+ )
+ ).decode()
+ )
+ mock_upgrade.assert_called_once_with(self.mock_runner)
+ mock_get_cib.assert_called_once_with(self.mock_runner)
+
+ def test_upgraded_higher_version(self, mock_upgrade, mock_get_cib):
+ upgraded_cib = '<cib validate-with="pacemaker-2.3.6"/>'
+ mock_get_cib.return_value = upgraded_cib
+ assert_xml_equal(
+ upgraded_cib,
+ etree.tostring(
+ lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 5)
+ )
+ ).decode()
+ )
+ mock_upgrade.assert_called_once_with(self.mock_runner)
+ mock_get_cib.assert_called_once_with(self.mock_runner)
+
+ def test_upgraded_lower_version(self, mock_upgrade, mock_get_cib):
+ mock_get_cib.return_value = etree.tostring(self.cib).decode()
+ assert_raise_library_error(
+ lambda: lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 5)
+ ),
+ (
+ Severity.ERROR,
+ report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
+ {
+ "required_version": "2.3.5",
+ "current_version": "2.3.4"
+ }
+ )
+ )
+ mock_upgrade.assert_called_once_with(self.mock_runner)
+ mock_get_cib.assert_called_once_with(self.mock_runner)
+
+ def test_cib_parse_error(self, mock_upgrade, mock_get_cib):
+ mock_get_cib.return_value = "not xml"
+ assert_raise_library_error(
+ lambda: lib.ensure_cib_version(
+ self.mock_runner, self.cib, (2, 3, 5)
+ ),
+ (
+ Severity.ERROR,
+ report_codes.CIB_UPGRADE_FAILED,
+ {
+ "reason":
+ start_tag_error_text(),
+ }
+ )
+ )
+ mock_upgrade.assert_called_once_with(self.mock_runner)
+ mock_get_cib.assert_called_once_with(self.mock_runner)
+
class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
def test_offline(self):
expected_stdout = "some info"
@@ -508,6 +614,37 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
self.assertEqual(len(return_value_list), mock_runner.run.call_count)
mock_runner.run.assert_has_calls(call_list)
+class RemoveNode(LibraryPacemakerTest):
+ def test_success(self):
+ mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ mock_runner.run.return_value = ("", "", 0)
+ lib.remove_node(
+ mock_runner,
+ "NODE_NAME"
+ )
+ mock_runner.run.assert_called_once_with([
+ self.path("crm_node"),
+ "--force",
+ "--remove",
+ "NODE_NAME",
+ ])
+
+ def test_error(self):
+ mock_runner = mock.MagicMock(spec_set=CommandRunner)
+ expected_stderr = "expected stderr"
+ mock_runner.run.return_value = ("", expected_stderr, 1)
+ assert_raise_library_error(
+ lambda: lib.remove_node(mock_runner, "NODE_NAME") ,
+ (
+ Severity.ERROR,
+ report_codes.NODE_REMOVE_IN_PACEMAKER_FAILED,
+ {
+ "node_name": "NODE_NAME",
+ "reason": expected_stderr,
+ }
+ )
+ )
+
class ResourceCleanupTest(LibraryPacemakerTest):
def fixture_status_xml(self, nodes, resources):
xml_man = XmlManipulation.from_file(rc("crm_mon.minimal.xml"))
@@ -700,7 +837,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
)
self.assertTrue(
- lib.has_resource_wait_support(mock_runner)
+ lib.has_wait_for_idle_support(mock_runner)
)
mock_runner.run.assert_called_once_with(
[self.path("crm_resource"), "-?"]
@@ -718,7 +855,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
)
self.assertTrue(
- lib.has_resource_wait_support(mock_runner)
+ lib.has_wait_for_idle_support(mock_runner)
)
mock_runner.run.assert_called_once_with(
[self.path("crm_resource"), "-?"]
@@ -736,25 +873,31 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
)
self.assertFalse(
- lib.has_resource_wait_support(mock_runner)
+ lib.has_wait_for_idle_support(mock_runner)
)
mock_runner.run.assert_called_once_with(
[self.path("crm_resource"), "-?"]
)
- @mock.patch("pcs.lib.pacemaker.has_resource_wait_support", autospec=True)
+ @mock.patch(
+ "pcs.lib.pacemaker.live.has_wait_for_idle_support",
+ autospec=True
+ )
def test_ensure_support_success(self, mock_obj):
mock_obj.return_value = True
- self.assertEqual(None, lib.ensure_resource_wait_support(mock.Mock()))
+ self.assertEqual(None, lib.ensure_wait_for_idle_support(mock.Mock()))
- @mock.patch("pcs.lib.pacemaker.has_resource_wait_support", autospec=True)
+ @mock.patch(
+ "pcs.lib.pacemaker.live.has_wait_for_idle_support",
+ autospec=True
+ )
def test_ensure_support_error(self, mock_obj):
mock_obj.return_value = False
assert_raise_library_error(
- lambda: lib.ensure_resource_wait_support(mock.Mock()),
+ lambda: lib.ensure_wait_for_idle_support(mock.Mock()),
(
Severity.ERROR,
- report_codes.RESOURCE_WAIT_NOT_SUPPORTED,
+ report_codes.WAIT_FOR_IDLE_NOT_SUPPORTED,
{}
)
)
@@ -770,7 +913,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_retval
)
- self.assertEqual(None, lib.wait_for_resources(mock_runner))
+ self.assertEqual(None, lib.wait_for_idle(mock_runner))
mock_runner.run.assert_called_once_with(
[self.path("crm_resource"), "--wait"]
@@ -788,7 +931,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
expected_retval
)
- self.assertEqual(None, lib.wait_for_resources(mock_runner, timeout))
+ self.assertEqual(None, lib.wait_for_idle(mock_runner, timeout))
mock_runner.run.assert_called_once_with(
[
@@ -809,10 +952,10 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
)
assert_raise_library_error(
- lambda: lib.wait_for_resources(mock_runner),
+ lambda: lib.wait_for_idle(mock_runner),
(
Severity.ERROR,
- report_codes.RESOURCE_WAIT_ERROR,
+ report_codes.WAIT_FOR_IDLE_ERROR,
{
"reason": expected_stderr + "\n" + expected_stdout,
}
@@ -835,10 +978,10 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
)
assert_raise_library_error(
- lambda: lib.wait_for_resources(mock_runner),
+ lambda: lib.wait_for_idle(mock_runner),
(
Severity.ERROR,
- report_codes.RESOURCE_WAIT_TIMED_OUT,
+ report_codes.WAIT_FOR_IDLE_TIMED_OUT,
{
"reason": expected_stderr + "\n" + expected_stdout,
}
@@ -848,231 +991,3 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
mock_runner.run.assert_called_once_with(
[self.path("crm_resource"), "--wait"]
)
-
-class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
- def test_standby_local(self):
- expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = ("dummy", "", expected_retval)
-
- output = lib.nodes_standby(mock_runner)
-
- mock_runner.run.assert_called_once_with(
- [self.path("crm_standby"), "-v", "on"]
- )
- self.assertEqual(None, output)
-
- def test_unstandby_local(self):
- expected_retval = 0
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = ("dummy", "", expected_retval)
-
- output = lib.nodes_unstandby(mock_runner)
-
- mock_runner.run.assert_called_once_with(
- [self.path("crm_standby"), "-D"]
- )
- self.assertEqual(None, output)
-
- def test_standby_all(self):
- nodes = ("node1", "node2", "node3")
- for i, n in enumerate(nodes, 1):
- self.fixture_add_node_status(
- self.fixture_get_node_status(n, i)
- )
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- call_list = [mock.call(self.crm_mon_cmd())]
- call_list += [
- mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
- for n in nodes
- ]
- return_value_list = [(str(self.status), "", 0)]
- return_value_list += [("dummy", "", 0) for n in nodes]
- mock_runner.run.side_effect = return_value_list
-
- output = lib.nodes_standby(mock_runner, all_nodes=True)
-
- self.assertEqual(len(return_value_list), len(call_list))
- self.assertEqual(len(return_value_list), mock_runner.run.call_count)
- mock_runner.run.assert_has_calls(call_list)
- self.assertEqual(None, output)
-
- def test_unstandby_all(self):
- nodes = ("node1", "node2", "node3")
- for i, n in enumerate(nodes, 1):
- self.fixture_add_node_status(
- self.fixture_get_node_status(n, i)
- )
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- call_list = [mock.call(self.crm_mon_cmd())]
- call_list += [
- mock.call([self.path("crm_standby"), "-D", "-N", n])
- for n in nodes
- ]
- return_value_list = [(str(self.status), "", 0)]
- return_value_list += [("dummy", "", 0) for n in nodes]
- mock_runner.run.side_effect = return_value_list
-
- output = lib.nodes_unstandby(mock_runner, all_nodes=True)
-
- self.assertEqual(len(return_value_list), len(call_list))
- self.assertEqual(len(return_value_list), mock_runner.run.call_count)
- mock_runner.run.assert_has_calls(call_list)
- self.assertEqual(None, output)
-
- def test_standby_nodes(self):
- nodes = ("node1", "node2", "node3")
- for i, n in enumerate(nodes, 1):
- self.fixture_add_node_status(
- self.fixture_get_node_status(n, i)
- )
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- call_list = [mock.call(self.crm_mon_cmd())]
- call_list += [
- mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
- for n in nodes[1:]
- ]
- return_value_list = [(str(self.status), "", 0)]
- return_value_list += [("dummy", "", 0) for n in nodes[1:]]
- mock_runner.run.side_effect = return_value_list
-
- output = lib.nodes_standby(mock_runner, node_list=nodes[1:])
-
- self.assertEqual(len(return_value_list), len(call_list))
- self.assertEqual(len(return_value_list), mock_runner.run.call_count)
- mock_runner.run.assert_has_calls(call_list)
- self.assertEqual(None, output)
-
- def test_unstandby_nodes(self):
- nodes = ("node1", "node2", "node3")
- for i, n in enumerate(nodes, 1):
- self.fixture_add_node_status(
- self.fixture_get_node_status(n, i)
- )
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- call_list = [mock.call(self.crm_mon_cmd())]
- call_list += [
- mock.call([self.path("crm_standby"), "-D", "-N", n])
- for n in nodes[:2]
- ]
- return_value_list = [(str(self.status), "", 0)]
- return_value_list += [("dummy", "", 0) for n in nodes[:2]]
- mock_runner.run.side_effect = return_value_list
-
- output = lib.nodes_unstandby(mock_runner, node_list=nodes[:2])
-
- self.assertEqual(len(return_value_list), len(call_list))
- self.assertEqual(len(return_value_list), mock_runner.run.call_count)
- mock_runner.run.assert_has_calls(call_list)
- self.assertEqual(None, output)
-
- def test_standby_unknown_node(self):
- self.fixture_add_node_status(
- self.fixture_get_node_status("node_1", "id_1")
- )
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (str(self.status), "", 0)
-
- assert_raise_library_error(
- lambda: lib.nodes_standby(mock_runner, ["node_2"]),
- (
- Severity.ERROR,
- report_codes.NODE_NOT_FOUND,
- {"node": "node_2"}
- )
- )
-
- mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
-
- def test_unstandby_unknown_node(self):
- self.fixture_add_node_status(
- self.fixture_get_node_status("node_1", "id_1")
- )
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (str(self.status), "", 0)
-
- assert_raise_library_error(
- lambda: lib.nodes_unstandby(mock_runner, ["node_2", "node_3"]),
- (
- Severity.ERROR,
- report_codes.NODE_NOT_FOUND,
- {"node": "node_2"}
- ),
- (
- Severity.ERROR,
- report_codes.NODE_NOT_FOUND,
- {"node": "node_3"}
- )
- )
-
- mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
-
- def test_error_one_node(self):
- expected_stdout = "some info"
- expected_stderr = "some error"
- expected_retval = 1
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- mock_runner.run.return_value = (
- expected_stdout,
- expected_stderr,
- expected_retval
- )
-
- assert_raise_library_error(
- lambda: lib.nodes_unstandby(mock_runner),
- (
- Severity.ERROR,
- report_codes.COMMON_ERROR,
- {
- "text": expected_stderr + "\n" + expected_stdout,
- }
- )
- )
-
- mock_runner.run.assert_called_once_with(
- [self.path("crm_standby"), "-D"]
- )
-
- def test_error_some_nodes(self):
- nodes = ("node1", "node2", "node3", "node4")
- for i, n in enumerate(nodes, 1):
- self.fixture_add_node_status(
- self.fixture_get_node_status(n, i)
- )
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- call_list = [mock.call(self.crm_mon_cmd())]
- call_list += [
- mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
- for n in nodes
- ]
- return_value_list = [
- (str(self.status), "", 0),
- ("dummy1", "", 0),
- ("dummy2", "error2", 1),
- ("dummy3", "", 0),
- ("dummy4", "error4", 1),
- ]
- mock_runner.run.side_effect = return_value_list
-
- assert_raise_library_error(
- lambda: lib.nodes_standby(mock_runner, all_nodes=True),
- (
- Severity.ERROR,
- report_codes.COMMON_ERROR,
- {
- "text": "error2\ndummy2",
- }
- ),
- (
- Severity.ERROR,
- report_codes.COMMON_ERROR,
- {
- "text": "error4\ndummy4",
- }
- )
- )
-
- self.assertEqual(len(return_value_list), len(call_list))
- self.assertEqual(len(return_value_list), mock_runner.run.call_count)
- mock_runner.run.assert_has_calls(call_list)
-
diff --git a/pcs/lib/pacemaker/test/test_state.py b/pcs/lib/pacemaker/test/test_state.py
new file mode 100644
index 0000000..a29eddf
--- /dev/null
+++ b/pcs/lib/pacemaker/test/test_state.py
@@ -0,0 +1,858 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase, mock
+from lxml import etree
+
+from pcs.test.tools.assertions import (
+ assert_raise_library_error,
+ assert_report_item_equal,
+)
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
+from pcs.lib.pacemaker import state
+from pcs.lib.pacemaker.state import (
+ ClusterState,
+ _Attrs,
+ _Children,
+)
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severities
+
+class AttrsTest(TestCase):
+ def test_get_declared_attr(self):
+ attrs = _Attrs('test', {'node-name': 'node1'}, {'name': 'node-name'})
+ self.assertEqual('node1', attrs.name)
+
+ def test_raises_on_undeclared_attribute(self):
+ attrs = _Attrs('test', {'node-name': 'node1'}, {})
+ self.assertRaises(AttributeError, lambda: attrs.name)
+
+ def test_raises_on_missing_required_attribute(self):
+ attrs = _Attrs('test', {}, {'name': 'node-name'})
+ self.assertRaises(AttributeError, lambda: attrs.name)
+
+ def test_attr_transformation_success(self):
+ attrs = _Attrs('test', {'number': '7'}, {'count': ('number', int)})
+ self.assertEqual(7, attrs.count)
+
+ def test_attr_transformation_fail(self):
+ attrs = _Attrs('test', {'number': 'abc'}, {'count': ('number', int)})
+ self.assertRaises(ValueError, lambda: attrs.count)
+
+class ChildrenTest(TestCase):
+ def setUp(self):
+ self.dom = etree.fromstring(
+ '<main><some name="0"/><any name="1"/><any name="2"/></main>'
+ )
+
+ def wrap(self, element):
+ return '{0}.{1}'.format(element.tag, element.attrib['name'])
+
+ def test_get_declared_section(self):
+ children = _Children(
+ 'test', self.dom, {}, {'some_section': ('some', self.wrap)}
+ )
+ self.assertEqual('some.0', children.some_section)
+
+ def test_get_declared_children(self):
+ children = _Children('test', self.dom, {'anys': ('any', self.wrap)}, {})
+ self.assertEqual(['any.1', 'any.2'], children.anys)
+
+ def test_raises_on_undeclared_children(self):
+ children = _Children('test', self.dom, {}, {})
+ self.assertRaises(AttributeError, lambda: children.some_section)
+
+
+class TestBase(TestCase):
+ def setUp(self):
+ self.create_covered_status = get_xml_manipulation_creator_from_file(
+ rc('crm_mon.minimal.xml')
+ )
+ self.covered_status = self.create_covered_status()
+
+class ClusterStatusTest(TestBase):
+ def test_minimal_crm_mon_is_valid(self):
+ ClusterState(str(self.covered_status))
+
+ def test_refuse_invalid_xml(self):
+ assert_raise_library_error(
+ lambda: ClusterState('invalid xml'),
+ (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {})
+ )
+
+ def test_refuse_invalid_document(self):
+ self.covered_status.append_to_first_tag_name(
+ 'nodes',
+ '<node without="required attributes" />'
+ )
+
+ assert_raise_library_error(
+ lambda: ClusterState(str(self.covered_status)),
+ (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {})
+ )
+
+
+class WorkWithClusterStatusNodesTest(TestBase):
+ def fixture_node_string(self, **kwargs):
+ attrs = dict(name='name', id='id', type='member')
+ attrs.update(kwargs)
+ return '''<node
+ name="{name}"
+ id="{id}"
+ online="true"
+ standby="true"
+ standby_onfail="false"
+ maintenance="false"
+ pending="false"
+ unclean="false"
+ shutdown="false"
+ expected_up="false"
+ is_dc="false"
+ resources_running="0"
+ type="{type}"
+ />'''.format(**attrs)
+
+ def test_can_get_node_names(self):
+ self.covered_status.append_to_first_tag_name(
+ 'nodes',
+ self.fixture_node_string(name='node1', id='1'),
+ self.fixture_node_string(name='node2', id='2'),
+ )
+ xml = str(self.covered_status)
+ self.assertEqual(
+ ['node1', 'node2'],
+ [node.attrs.name for node in ClusterState(xml).node_section.nodes]
+ )
+
+ def test_can_filter_out_remote_nodes(self):
+ self.covered_status.append_to_first_tag_name(
+ 'nodes',
+ self.fixture_node_string(name='node1', id='1'),
+ self.fixture_node_string(name='node2', type='remote', id='2'),
+ )
+ xml = str(self.covered_status)
+ self.assertEqual(
+ ['node1'],
+ [
+ node.attrs.name
+ for node in ClusterState(xml).node_section.nodes
+ if node.attrs.type != 'remote'
+ ]
+ )
+
+
+class WorkWithClusterStatusSummaryTest(TestBase):
+ def test_nodes_count(self):
+ xml = str(self.covered_status)
+ self.assertEqual(0, ClusterState(xml).summary.nodes.attrs.count)
+
+ def test_resources_count(self):
+ xml = str(self.covered_status)
+ self.assertEqual(0, ClusterState(xml).summary.resources.attrs.count)
+
+
+class GetPrimitiveRolesWithNodes(TestCase):
+ def test_success(self):
+ primitives_xml = [
+ """
+ <resource id="A" role="Started">
+ <node name="node1" id="1"/>
+ </resource>
+ """,
+ """
+ <resource id="A" role="Master">
+ <node name="node2" id="2"/>
+ </resource>
+ """,
+ """
+ <resource id="A" role="Slave">
+ <node name="node4" id="4"/>
+ </resource>
+ """,
+ """
+ <resource id="A" role="Slave">
+ <node name="node3" id="3"/>
+ </resource>
+ """,
+ """
+ <resource id="A" role="Stopped">
+ </resource>
+ """,
+ """
+ <resource id="A" role="Started">
+ <node name="node5" id="5"/>
+ </resource>
+ """,
+ ]
+ primitives = [
+ etree.fromstring(xml) for xml in primitives_xml
+ ]
+
+ self.assertEqual(
+ state._get_primitive_roles_with_nodes(primitives),
+ {
+ "Started": ["node1", "node5"],
+ "Master": ["node2"],
+ "Slave": ["node3", "node4"]
+ }
+ )
+
+ def test_empty(self):
+ self.assertEqual(
+ state._get_primitive_roles_with_nodes([]),
+ {
+ }
+ )
+
+
+class GetPrimitivesForStateCheck(TestCase):
+ status_xml = etree.fromstring("""
+ <resources>
+ <resource id="R01" failed="false" />
+ <resource id="R02" failed="true" />
+
+ <group id="G1">
+ <resource id="R03" failed="false" />
+ <resource id="R04" failed="false" />
+ </group>
+ <group id="G2">
+ <resource id="R05" failed="true" />
+ <resource id="R06" failed="true" />
+ </group>
+
+ <clone id="R07-clone">
+ <resource id="R07" failed="false" />
+ <resource id="R07" failed="false" />
+ </clone>
+ <clone id="R08-clone">
+ <resource id="R08" failed="true" />
+ <resource id="R08" failed="false" />
+ </clone>
+ <clone id="R09-clone">
+ <resource id="R09" failed="true" />
+ <resource id="R09" failed="true" />
+ </clone>
+
+ <clone id="R10-clone">
+ <resource id="R10:0" failed="false" />
+ <resource id="R10:1" failed="false" />
+ </clone>
+ <clone id="R11-clone">
+ <resource id="R11:0" failed="false" />
+ <resource id="R11:1" failed="true" />
+ </clone>
+ <clone id="R12-clone">
+ <resource id="R12:0" failed="true" />
+ <resource id="R12:1" failed="true" />
+ </clone>
+
+ <clone id="G3-clone">
+ <group id="G3:0">
+ <resource id="R13" failed="false" />
+ <resource id="R14" failed="false" />
+ </group>
+ <group id="G3:1">
+ <resource id="R13" failed="false" />
+ <resource id="R14" failed="false" />
+ </group>
+ </clone>
+ <clone id="G4-clone">
+ <group id="G4:0">
+ <resource id="R15" failed="true" />
+ <resource id="R16" failed="true" />
+ </group>
+ <group id="G4:1">
+ <resource id="R15" failed="false" />
+ <resource id="R16" failed="false" />
+ </group>
+ </clone>
+ <clone id="G5-clone">
+ <group id="G5:0">
+ <resource id="R17" failed="true" />
+ <resource id="R18" failed="true" />
+ </group>
+ <group id="G5:1">
+ <resource id="R17" failed="true" />
+ <resource id="R18" failed="true" />
+ </group>
+ </clone>
+
+ <clone id="G6-clone" managed="true">
+ <group id="G6:0">
+ <resource id="R19:0" failed="false" />
+ <resource id="R20:0" failed="false" />
+ </group>
+ <group id="G6:1">
+ <resource id="R19:1" failed="false" />
+ <resource id="R20:1" failed="false" />
+ </group>
+ </clone>
+ <clone id="G7-clone" managed="true">
+ <group id="G7:0">
+ <resource id="R21:0" failed="true" />
+ <resource id="R22:0" failed="true" />
+ </group>
+ <group id="G7:1">
+ <resource id="R21:1" failed="false" />
+ <resource id="R22:1" failed="false" />
+ </group>
+ </clone>
+ <clone id="G8-clone" managed="true">
+ <group id="G8:0">
+ <resource id="R23:0" failed="true" />
+ <resource id="R24:0" failed="true" />
+ </group>
+ <group id="G8:1">
+ <resource id="R23:1" failed="true" />
+ <resource id="R24:1" failed="true" />
+ </group>
+ </clone>
+ <bundle id="B1" managed="true">
+ <replica id="0">
+ <resource id="B1-R1" failed="false" />
+ <resource id="B1-R2" failed="false" />
+ </replica>
+ </bundle>
+ <bundle id="B2" managed="true">
+ <replica id="0">
+ <resource id="B2-R1" failed="true" />
+ <resource id="B2-R2" failed="false" />
+ </replica>
+ <replica id="1">
+ <resource id="B2-R1" failed="false" />
+ <resource id="B2-R2" failed="false" />
+ </replica>
+ </bundle>
+ </resources>
+ """)
+
+ def setUp(self):
+ self.status = etree.parse(rc("crm_mon.minimal.xml")).getroot()
+ self.status.append(self.status_xml)
+ for resource in self.status.xpath(".//resource"):
+ resource.attrib.update({
+ "resource_agent": "ocf::pacemaker:Stateful",
+ "role": "Started",
+ "active": "true",
+ "orphaned": "false",
+ "blocked": "false",
+ "managed": "true",
+ "failure_ignored": "false",
+ "nodes_running_on": "1",
+ })
+
+ def assert_primitives(self, resource_id, primitive_ids, expected_running):
+ self.assertEqual(
+ [
+ elem.attrib["id"]
+ for elem in state._get_primitives_for_state_check(
+ self.status, resource_id, expected_running
+ )
+ ],
+ primitive_ids
+ )
+
+ def test_missing(self):
+ self.assert_primitives("Rxx", [], True)
+ self.assert_primitives("Rxx", [], False)
+
+ def test_primitive(self):
+ self.assert_primitives("R01", ["R01"], True)
+ self.assert_primitives("R01", ["R01"], False)
+
+ def test_primitive_failed(self):
+ self.assert_primitives("R02", [], True)
+ self.assert_primitives("R02", [], False)
+
+ def test_group(self):
+ self.assert_primitives("G1", ["R04"], True)
+ self.assert_primitives("G1", ["R03"], False)
+
+ def test_group_failed_primitive(self):
+ self.assert_primitives("G2", [], True)
+ self.assert_primitives("G2", [], False)
+
+ def test_primitive_in_group(self):
+ self.assert_primitives("R03", ["R03"], True)
+ self.assert_primitives("R03", ["R03"], False)
+
+ def test_primitive_in_group_failed(self):
+ self.assert_primitives("R05", [], True)
+ self.assert_primitives("R05", [], False)
+
+ def test_clone(self):
+ self.assert_primitives("R07-clone", ["R07", "R07"], True)
+ self.assert_primitives("R07-clone", ["R07", "R07"], False)
+ self.assert_primitives("R10-clone", ["R10:0", "R10:1"], True)
+ self.assert_primitives("R10-clone", ["R10:0", "R10:1"], False)
+
+ def test_clone_partially_failed(self):
+ self.assert_primitives("R08-clone", ["R08"], True)
+ self.assert_primitives("R08-clone", ["R08"], False)
+ self.assert_primitives("R11-clone", ["R11:0"], True)
+ self.assert_primitives("R11-clone", ["R11:0"], False)
+
+ def test_clone_failed(self):
+ self.assert_primitives("R09-clone", [], True)
+ self.assert_primitives("R09-clone", [], False)
+ self.assert_primitives("R12-clone", [], True)
+ self.assert_primitives("R12-clone", [], False)
+
+ def test_primitive_in_clone(self):
+ self.assert_primitives("R07", ["R07", "R07"], True)
+ self.assert_primitives("R07", ["R07", "R07"], False)
+ self.assert_primitives("R10", ["R10:0", "R10:1"], True)
+ self.assert_primitives("R10", ["R10:0", "R10:1"], False)
+
+ def test_primitive_in_clone_partially_failed(self):
+ self.assert_primitives("R08", ["R08"], True)
+ self.assert_primitives("R08", ["R08"], False)
+ self.assert_primitives("R11", ["R11:0"], True)
+ self.assert_primitives("R11", ["R11:0"], False)
+
+ def test_primitive_in_clone_failed(self):
+ self.assert_primitives("R09", [], True)
+ self.assert_primitives("R09", [], False)
+ self.assert_primitives("R12", [], True)
+ self.assert_primitives("R12", [], False)
+
+ def test_clone_containing_group(self):
+ self.assert_primitives("G3-clone", ["R14", "R14"], True)
+ self.assert_primitives("G3-clone", ["R13", "R13"], False)
+ self.assert_primitives("G6-clone", ["R20:0", "R20:1"], True)
+ self.assert_primitives("G6-clone", ["R19:0", "R19:1"], False)
+
+ def test_clone_containing_group_partially_failed(self):
+ self.assert_primitives("G4-clone", ["R16"], True)
+ self.assert_primitives("G4-clone", ["R15"], False)
+ self.assert_primitives("G7-clone", ["R22:1"], True)
+ self.assert_primitives("G7-clone", ["R21:1"], False)
+
+ def test_clone_containing_group_failed(self):
+ self.assert_primitives("G5-clone", [], True)
+ self.assert_primitives("G5-clone", [], False)
+ self.assert_primitives("G8-clone", [], True)
+ self.assert_primitives("G8-clone", [], False)
+
+ def test_group_in_clone_containing_group(self):
+ self.assert_primitives("G3", ["R14", "R14"], True)
+ self.assert_primitives("G3", ["R13", "R13"], False)
+ self.assert_primitives("G6", ["R20:0", "R20:1"], True)
+ self.assert_primitives("G6", ["R19:0", "R19:1"], False)
+
+ def test_group_in_clone_containing_group_partially_failed(self):
+ self.assert_primitives("G4", ["R16"], True)
+ self.assert_primitives("G4", ["R15"], False)
+ self.assert_primitives("G7", ["R22:1"], True)
+ self.assert_primitives("G7", ["R21:1"], False)
+
+ def test_group_in_clone_containing_group_failed(self):
+ self.assert_primitives("G5", [], True)
+ self.assert_primitives("G5", [], False)
+ self.assert_primitives("G8", [], True)
+ self.assert_primitives("G8", [], False)
+
+ def test_primitive_in_clone_containing_group(self):
+ self.assert_primitives("R14", ["R14", "R14"], True)
+ self.assert_primitives("R14", ["R14", "R14"], False)
+ self.assert_primitives("R20", ["R20:0", "R20:1"], True)
+ self.assert_primitives("R20", ["R20:0", "R20:1"], False)
+
+ def test_primitive_in_clone_containing_group_partially_failed(self):
+ self.assert_primitives("R16", ["R16"], True)
+ self.assert_primitives("R16", ["R16"], False)
+ self.assert_primitives("R22", ["R22:1"], True)
+ self.assert_primitives("R22", ["R22:1"], False)
+
+ def test_primitive_in_clone_containing_group_failed(self):
+ self.assert_primitives("R18", [], True)
+ self.assert_primitives("R18", [], False)
+ self.assert_primitives("R24", [], True)
+ self.assert_primitives("R24", [], False)
+
+ def test_bundle(self):
+ self.assert_primitives("B1", ["B1-R1", "B1-R2"], True)
+ self.assert_primitives("B1", ["B1-R1", "B1-R2"], False)
+ self.assert_primitives("B2", ["B2-R2", "B2-R1", "B2-R2"], True)
+ self.assert_primitives("B2", ["B2-R2", "B2-R1", "B2-R2"], False)
+
+ def test_primitive_in_bundle(self):
+ self.assert_primitives("B1-R1", ["B1-R1"], True)
+ self.assert_primitives("B1-R1", ["B1-R1"], False)
+ self.assert_primitives("B2-R1", ["B2-R1"], True)
+ self.assert_primitives("B2-R1", ["B2-R1"], False)
+ self.assert_primitives("B2-R2", ["B2-R2", "B2-R2"], True)
+ self.assert_primitives("B2-R2", ["B2-R2", "B2-R2"], False)
+
+
+class EnsureResourceState(TestCase):
+ resource_id = "R"
+ def setUp(self):
+ self.cluster_state = "state"
+
+ patcher_primitives = mock.patch(
+ "pcs.lib.pacemaker.state._get_primitives_for_state_check"
+ )
+ self.addCleanup(patcher_primitives.stop)
+ self.get_primitives_for_state_check = patcher_primitives.start()
+
+ patcher_roles = mock.patch(
+ "pcs.lib.pacemaker.state._get_primitive_roles_with_nodes"
+ )
+ self.addCleanup(patcher_roles.stop)
+ self.get_primitive_roles_with_nodes = patcher_roles.start()
+
+ def fixture_running_state_info(self):
+ return {
+ "Started": ["node1"],
+ "Master": ["node2"],
+ "Slave": ["node3", "node4"],
+ }
+
+ def fixture_running_report(self, severity):
+ return (severity, report_codes.RESOURCE_RUNNING_ON_NODES, {
+ "resource_id": self.resource_id,
+ "roles_with_nodes": self.fixture_running_state_info(),
+ })
+
+ def fixture_not_running_report(self, severity):
+ return (severity, report_codes.RESOURCE_DOES_NOT_RUN, {
+ "resource_id": self.resource_id
+ })
+
+ def assert_running_info_transform(self, run_info, report, expected_running):
+ self.get_primitives_for_state_check.return_value = ["elem1", "elem2"]
+ self.get_primitive_roles_with_nodes.return_value = run_info
+ assert_report_item_equal(
+ state.ensure_resource_state(
+ expected_running,
+ self.cluster_state,
+ self.resource_id
+ ),
+ report
+ )
+ self.get_primitives_for_state_check.assert_called_once_with(
+ self.cluster_state,
+ self.resource_id,
+ expected_running
+ )
+ self.get_primitive_roles_with_nodes.assert_called_once_with(
+ ["elem1", "elem2"]
+ )
+
+ def test_report_info_running(self):
+ self.assert_running_info_transform(
+ self.fixture_running_state_info(),
+ self.fixture_running_report(severities.INFO),
+ expected_running=True,
+ )
+
+ def test_report_error_running(self):
+ self.assert_running_info_transform(
+ self.fixture_running_state_info(),
+ self.fixture_running_report(severities.ERROR),
+ expected_running=False,
+ )
+
+ def test_report_error_not_running(self):
+ self.assert_running_info_transform(
+ [],
+ self.fixture_not_running_report(severities.ERROR),
+ expected_running=True,
+ )
+
+ def test_report_info_not_running(self):
+ self.assert_running_info_transform(
+ [],
+ self.fixture_not_running_report(severities.INFO),
+ expected_running=False,
+ )
+
+
+class IsResourceManaged(TestCase):
+ status_xml = etree.fromstring("""
+ <resources>
+ <resource id="R01" managed="true" />
+ <resource id="R02" managed="false" />
+
+ <group id="G1">
+ <resource id="R03" managed="true" />
+ <resource id="R04" managed="true" />
+ </group>
+ <group id="G2">
+ <resource id="R05" managed="false" />
+ <resource id="R06" managed="true" />
+ </group>
+ <group id="G3">
+ <resource id="R07" managed="true" />
+ <resource id="R08" managed="false" />
+ </group>
+ <group id="G4">
+ <resource id="R09" managed="false" />
+ <resource id="R10" managed="false" />
+ </group>
+
+ <clone id="R11-clone" managed="true">
+ <resource id="R11" managed="true" />
+ <resource id="R11" managed="true" />
+ </clone>
+ <clone id="R12-clone" managed="true">
+ <resource id="R12" managed="false" />
+ <resource id="R12" managed="false" />
+ </clone>
+ <clone id="R13-clone" managed="false">
+ <resource id="R13" managed="true" />
+ <resource id="R13" managed="true" />
+ </clone>
+ <clone id="R14-clone" managed="false">
+ <resource id="R14" managed="false" />
+ <resource id="R14" managed="false" />
+ </clone>
+
+ <clone id="R15-clone" managed="true">
+ <resource id="R15:0" managed="true" />
+ <resource id="R15:1" managed="true" />
+ </clone>
+ <clone id="R16-clone" managed="true">
+ <resource id="R16:0" managed="false" />
+ <resource id="R16:1" managed="false" />
+ </clone>
+ <clone id="R17-clone" managed="false">
+ <resource id="R17:0" managed="true" />
+ <resource id="R17:1" managed="true" />
+ </clone>
+ <clone id="R18-clone" managed="false">
+ <resource id="R18:0" managed="false" />
+ <resource id="R18:1" managed="false" />
+ </clone>
+
+ <clone id="G5-clone" managed="true">
+ <group id="G5:0">
+ <resource id="R19" managed="true" />
+ <resource id="R20" managed="true" />
+ </group>
+ <group id="G5:1">
+ <resource id="R19" managed="true" />
+ <resource id="R20" managed="true" />
+ </group>
+ </clone>
+ <clone id="G6-clone" managed="false">
+ <group id="G6:0">
+ <resource id="R21" managed="true" />
+ <resource id="R22" managed="true" />
+ </group>
+ <group id="G6:1">
+ <resource id="R21" managed="true" />
+ <resource id="R22" managed="true" />
+ </group>
+ </clone>
+ <clone id="G7-clone" managed="true">
+ <group id="G7:0">
+ <resource id="R23" managed="false" />
+ <resource id="R24" managed="true" />
+ </group>
+ <group id="G7:1">
+ <resource id="R23" managed="false" />
+ <resource id="R24" managed="true" />
+ </group>
+ </clone>
+ <clone id="G8-clone" managed="true">
+ <group id="G8:0">
+ <resource id="R25" managed="true" />
+ <resource id="R26" managed="false" />
+ </group>
+ <group id="G8:1">
+ <resource id="R25" managed="true" />
+ <resource id="R26" managed="false" />
+ </group>
+ </clone>
+ <clone id="G9-clone" managed="false">
+ <group id="G9:0">
+ <resource id="R27" managed="false" />
+ <resource id="R28" managed="false" />
+ </group>
+ <group id="G9:1">
+ <resource id="R27" managed="false" />
+ <resource id="R28" managed="false" />
+ </group>
+ </clone>
+
+ <clone id="G10-clone" managed="true">
+ <group id="G10:0">
+ <resource id="R29:0" managed="true" />
+ <resource id="R30:0" managed="true" />
+ </group>
+ <group id="G10:1">
+ <resource id="R29:1" managed="true" />
+ <resource id="R30:1" managed="true" />
+ </group>
+ </clone>
+ <clone id="G11-clone" managed="false">
+ <group id="G11:0">
+ <resource id="R31:0" managed="true" />
+ <resource id="R32:0" managed="true" />
+ </group>
+ <group id="G11:1">
+ <resource id="R31:1" managed="true" />
+ <resource id="R32:1" managed="true" />
+ </group>
+ </clone>
+ <clone id="G12-clone" managed="true">
+ <group id="G12:0">
+ <resource id="R33:0" managed="false" />
+ <resource id="R34:0" managed="true" />
+ </group>
+ <group id="G12:1">
+ <resource id="R33:1" managed="false" />
+ <resource id="R34:1" managed="true" />
+ </group>
+ </clone>
+ <clone id="G13-clone" managed="true">
+ <group id="G13:0">
+ <resource id="R35:0" managed="true" />
+ <resource id="R36:0" managed="false" />
+ </group>
+ <group id="G13:1">
+ <resource id="R35:1" managed="true" />
+ <resource id="R36:1" managed="false" />
+ </group>
+ </clone>
+ <clone id="G14-clone" managed="false">
+ <group id="G14:0">
+ <resource id="R37:0" managed="false" />
+ <resource id="R38:0" managed="false" />
+ </group>
+ <group id="G14:1">
+ <resource id="R37:1" managed="false" />
+ <resource id="R38:1" managed="false" />
+ </group>
+ </clone>
+ </resources>
+ """)
+
+ def setUp(self):
+ self.status = etree.parse(rc("crm_mon.minimal.xml")).getroot()
+ self.status.append(self.status_xml)
+ for resource in self.status.xpath(".//resource"):
+ resource.attrib.update({
+ "resource_agent": "ocf::pacemaker:Stateful",
+ "role": "Started",
+ "active": "true",
+ "orphaned": "false",
+ "blocked": "false",
+ "failed": "false",
+ "failure_ignored": "false",
+ "nodes_running_on": "1",
+ })
+
+ def assert_managed(self, resource, managed):
+ self.assertEqual(
+ managed,
+ state.is_resource_managed(self.status, resource)
+ )
+
+ def test_missing(self):
+ self.assertRaises(
+ state.ResourceNotFound,
+ self.assert_managed, "Rxx", True
+ )
+
+ def test_primitive(self):
+ self.assert_managed("R01", True)
+ self.assert_managed("R02", False)
+
+ def test_group(self):
+ self.assert_managed("G1", True)
+ self.assert_managed("G2", False)
+ self.assert_managed("G3", False)
+ self.assert_managed("G4", False)
+
+ def test_primitive_in_group(self):
+ self.assert_managed("R03", True)
+ self.assert_managed("R04", True)
+ self.assert_managed("R05", False)
+ self.assert_managed("R06", True)
+ self.assert_managed("R07", True)
+ self.assert_managed("R08", False)
+ self.assert_managed("R09", False)
+ self.assert_managed("R10", False)
+
+ def test_clone(self):
+ self.assert_managed("R11-clone", True)
+ self.assert_managed("R12-clone", False)
+ self.assert_managed("R13-clone", False)
+ self.assert_managed("R14-clone", False)
+
+ self.assert_managed("R15-clone", True)
+ self.assert_managed("R16-clone", False)
+ self.assert_managed("R17-clone", False)
+ self.assert_managed("R18-clone", False)
+
+ def test_primitive_in_clone(self):
+ self.assert_managed("R11", True)
+ self.assert_managed("R12", False)
+ self.assert_managed("R13", False)
+ self.assert_managed("R14", False)
+
+ def test_primitive_in_unique_clone(self):
+ self.assert_managed("R15", True)
+ self.assert_managed("R16", False)
+ self.assert_managed("R17", False)
+ self.assert_managed("R18", False)
+
+ def test_clone_containing_group(self):
+ self.assert_managed("G5-clone", True)
+ self.assert_managed("G6-clone", False)
+ self.assert_managed("G7-clone", False)
+ self.assert_managed("G8-clone", False)
+ self.assert_managed("G9-clone", False)
+
+ self.assert_managed("G10-clone", True)
+ self.assert_managed("G11-clone", False)
+ self.assert_managed("G12-clone", False)
+ self.assert_managed("G13-clone", False)
+ self.assert_managed("G14-clone", False)
+
+ def test_group_in_clone(self):
+ self.assert_managed("G5", True)
+ self.assert_managed("G6", False)
+ self.assert_managed("G7", False)
+ self.assert_managed("G8", False)
+ self.assert_managed("G9", False)
+
+ def test_group_in_unique_clone(self):
+ self.assert_managed("G10", True)
+ self.assert_managed("G11", False)
+ self.assert_managed("G12", False)
+ self.assert_managed("G13", False)
+ self.assert_managed("G14", False)
+
+ def test_primitive_in_group_in_clone(self):
+ self.assert_managed("R19", True)
+ self.assert_managed("R20", True)
+ self.assert_managed("R21", False)
+ self.assert_managed("R22", False)
+ self.assert_managed("R23", False)
+ self.assert_managed("R24", True)
+ self.assert_managed("R25", True)
+ self.assert_managed("R26", False)
+ self.assert_managed("R27", False)
+ self.assert_managed("R28", False)
+
+ def test_primitive_in_group_in_unique_clone(self):
+ self.assert_managed("R29", True)
+ self.assert_managed("R30", True)
+ self.assert_managed("R31", False)
+ self.assert_managed("R32", False)
+ self.assert_managed("R33", False)
+ self.assert_managed("R34", True)
+ self.assert_managed("R35", True)
+ self.assert_managed("R36", False)
+ self.assert_managed("R37", False)
+ self.assert_managed("R38", False)
diff --git a/pcs/lib/test/test_pacemaker_values.py b/pcs/lib/pacemaker/test/test_values.py
similarity index 75%
rename from pcs/lib/test/test_pacemaker_values.py
rename to pcs/lib/pacemaker/test/test_values.py
index e192971..ce3522a 100644
--- a/pcs/lib/test/test_pacemaker_values.py
+++ b/pcs/lib/pacemaker/test/test_values.py
@@ -12,7 +12,7 @@ from pcs.test.tools.assertions import assert_raise_library_error
from pcs.common import report_codes
from pcs.lib.errors import ReportItemSeverity as severity
-import pcs.lib.pacemaker_values as lib
+import pcs.lib.pacemaker.values as lib
class BooleanTest(TestCase):
@@ -46,6 +46,25 @@ class BooleanTest(TestCase):
self.assertTrue(lib.is_boolean("Y"))
self.assertTrue(lib.is_boolean("1"))
+ def test_false_is_false(self):
+ self.assertTrue(lib.is_false("false"))
+ self.assertTrue(lib.is_false("faLse"))
+ self.assertTrue(lib.is_false("off"))
+ self.assertTrue(lib.is_false("OFF"))
+ self.assertTrue(lib.is_false("no"))
+ self.assertTrue(lib.is_false("nO"))
+ self.assertTrue(lib.is_false("n"))
+ self.assertTrue(lib.is_false("N"))
+ self.assertTrue(lib.is_false("0"))
+
+ def test_nonfalse_is_not_false(self):
+ self.assertFalse(lib.is_false(""))
+ self.assertFalse(lib.is_false(" 0 "))
+ self.assertFalse(lib.is_false("x"))
+ self.assertFalse(lib.is_false("-1"))
+ self.assertFalse(lib.is_false("10"))
+ self.assertFalse(lib.is_false("heck no"))
+
def test_false_is_boolean(self):
self.assertTrue(lib.is_boolean("false"))
self.assertTrue(lib.is_boolean("fAlse"))
@@ -70,6 +89,7 @@ class BooleanTest(TestCase):
class TimeoutTest(TestCase):
def test_valid(self):
+ self.assertEqual(10, lib.timeout_to_seconds(10))
self.assertEqual(10, lib.timeout_to_seconds("10"))
self.assertEqual(10, lib.timeout_to_seconds("10s"))
self.assertEqual(10, lib.timeout_to_seconds("10sec"))
@@ -79,12 +99,14 @@ class TimeoutTest(TestCase):
self.assertEqual(36000, lib.timeout_to_seconds("10hr"))
def test_invalid(self):
+ self.assertEqual(None, lib.timeout_to_seconds(-10))
self.assertEqual(None, lib.timeout_to_seconds("1a1s"))
self.assertEqual(None, lib.timeout_to_seconds("10mm"))
self.assertEqual(None, lib.timeout_to_seconds("10mim"))
self.assertEqual(None, lib.timeout_to_seconds("aaa"))
self.assertEqual(None, lib.timeout_to_seconds(""))
+ self.assertEqual(-10, lib.timeout_to_seconds(-10, True))
self.assertEqual("1a1s", lib.timeout_to_seconds("1a1s", True))
self.assertEqual("10mm", lib.timeout_to_seconds("10mm", True))
self.assertEqual("10mim", lib.timeout_to_seconds("10mim", True))
@@ -224,28 +246,54 @@ class ValidateIdTest(TestCase):
report
)
+class SanitizeId(TestCase):
+ def test_dont_change_valid_id(self):
+ self.assertEqual("d", lib.sanitize_id("d"))
+ self.assertEqual("dummy", lib.sanitize_id("dummy"))
+ self.assertEqual("dum0my", lib.sanitize_id("dum0my"))
+ self.assertEqual("dum-my", lib.sanitize_id("dum-my"))
+ self.assertEqual("dum.my", lib.sanitize_id("dum.my"))
+ self.assertEqual("dum_my", lib.sanitize_id("dum_my"))
+ self.assertEqual("_dummy", lib.sanitize_id("_dummy"))
+
+ def test_empty(self):
+ self.assertEqual("", lib.sanitize_id(""))
+
+ def test_invalid_id(self):
+ self.assertEqual("", lib.sanitize_id("0"))
+ self.assertEqual("", lib.sanitize_id("-"))
+ self.assertEqual("", lib.sanitize_id("."))
+ self.assertEqual("", lib.sanitize_id(":", "_"))
+
+ self.assertEqual("dummy", lib.sanitize_id("0dummy"))
+ self.assertEqual("dummy", lib.sanitize_id("-dummy"))
+ self.assertEqual("dummy", lib.sanitize_id(".dummy"))
+ self.assertEqual("dummy", lib.sanitize_id(":dummy", "_"))
+
+ self.assertEqual("dummy", lib.sanitize_id("dum:my"))
+ self.assertEqual("dum_my", lib.sanitize_id("dum:my", "_"))
class IsScoreValueTest(TestCase):
def test_returns_true_for_number(self):
- self.assertTrue(lib.is_score_value("1"))
+ self.assertTrue(lib.is_score("1"))
def test_returns_true_for_minus_number(self):
- self.assertTrue(lib.is_score_value("-1"))
+ self.assertTrue(lib.is_score("-1"))
def test_returns_true_for_plus_number(self):
- self.assertTrue(lib.is_score_value("+1"))
+ self.assertTrue(lib.is_score("+1"))
def test_returns_true_for_infinity(self):
- self.assertTrue(lib.is_score_value("INFINITY"))
+ self.assertTrue(lib.is_score("INFINITY"))
def test_returns_true_for_minus_infinity(self):
- self.assertTrue(lib.is_score_value("-INFINITY"))
+ self.assertTrue(lib.is_score("-INFINITY"))
def test_returns_true_for_plus_infinity(self):
- self.assertTrue(lib.is_score_value("+INFINITY"))
+ self.assertTrue(lib.is_score("+INFINITY"))
def test_returns_false_for_nonumber_noinfinity(self):
- self.assertFalse(lib.is_score_value("something else"))
+ self.assertFalse(lib.is_score("something else"))
def test_returns_false_for_multiple_operators(self):
- self.assertFalse(lib.is_score_value("++INFINITY"))
+ self.assertFalse(lib.is_score("++INFINITY"))
diff --git a/pcs/lib/pacemaker_values.py b/pcs/lib/pacemaker/values.py
similarity index 54%
rename from pcs/lib/pacemaker_values.py
rename to pcs/lib/pacemaker/values.py
index 9ab6929..180d7f8 100644
--- a/pcs/lib/pacemaker_values.py
+++ b/pcs/lib/pacemaker/values.py
@@ -11,26 +11,43 @@ from pcs.lib import reports
from pcs.lib.errors import LibraryError
-__BOOLEAN_TRUE = ["true", "on", "yes", "y", "1"]
-__BOOLEAN_FALSE = ["false", "off", "no", "n", "0"]
+_BOOLEAN_TRUE = frozenset(["true", "on", "yes", "y", "1"])
+_BOOLEAN_FALSE = frozenset(["false", "off", "no", "n", "0"])
+_BOOLEAN = _BOOLEAN_TRUE | _BOOLEAN_FALSE
+_ID_FIRST_CHAR_NOT_RE = re.compile("[^a-zA-Z_]")
+_ID_REST_CHARS_NOT_RE = re.compile("[^a-zA-Z0-9_.-]")
SCORE_INFINITY = "INFINITY"
+def is_boolean(val):
+ """
+ Does pacemaker consider a value to be a boolean?
+ See crm_is_true in pacemaker/lib/common/utils.c
+ val checked value
+ """
+ return val.lower() in _BOOLEAN
+
def is_true(val):
"""
Does pacemaker consider a value to be true?
See crm_is_true in pacemaker/lib/common/utils.c
var checked value
"""
- return val.lower() in __BOOLEAN_TRUE
+ return val.lower() in _BOOLEAN_TRUE
-def is_boolean(val):
+def is_false(val):
"""
- Does pacemaker consider a value to be a boolean?
+ Does pacemaker consider a value to be false?
See crm_is_true in pacemaker/lib/common/utils.c
- val checked value
+ var checked value
"""
- return val.lower() in __BOOLEAN_TRUE + __BOOLEAN_FALSE
+ return val.lower() in _BOOLEAN_FALSE
+
+def is_score(value):
+ if not value:
+ return False
+ unsigned_value = value[1:] if value[0] in ("+", "-") else value
+ return unsigned_value == SCORE_INFINITY or unsigned_value.isdigit()
def timeout_to_seconds(timeout, return_unknown=False):
"""
@@ -39,8 +56,14 @@ def timeout_to_seconds(timeout, return_unknown=False):
return_unknown if timeout is not valid then return None on False or timeout
on True (default False)
"""
- if timeout.isdigit():
- return int(timeout)
+ try:
+ candidate = int(timeout)
+ if candidate >= 0:
+ return candidate
+ return timeout if return_unknown else None
+ except ValueError:
+ pass
+ # now we know the timeout is not an integer nor an integer string
suffix_multiplier = {
"s": 1,
"sec": 1,
@@ -67,7 +90,7 @@ def get_valid_timeout_seconds(timeout_candidate):
raise LibraryError(reports.invalid_timeout(timeout_candidate))
return wait_timeout
-def validate_id(id_candidate, description="id"):
+def validate_id(id_candidate, description="id", reporter=None):
"""
Validate a pacemaker id, raise LibraryError on invalid id.
@@ -78,23 +101,35 @@ def validate_id(id_candidate, description="id"):
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# http://www.w3.org/TR/REC-xml/#NT-Name
if len(id_candidate) < 1:
- raise LibraryError(reports.invalid_id_is_empty(
- id_candidate, description
- ))
- first_char_re = re.compile("[a-zA-Z_]")
- if not first_char_re.match(id_candidate[0]):
- raise LibraryError(reports.invalid_id_bad_char(
+ report = reports.invalid_id_is_empty(id_candidate, description)
+ if reporter is not None:
+ # we check for None so it works with an empty list as well
+ reporter.append(report)
+ return
+ else:
+ raise LibraryError(report)
+ if _ID_FIRST_CHAR_NOT_RE.match(id_candidate[0]):
+ report = reports.invalid_id_bad_char(
id_candidate, description, id_candidate[0], True
- ))
- char_re = re.compile("[a-zA-Z0-9_.-]")
+ )
+ if reporter is not None:
+ reporter.append(report)
+ else:
+ raise LibraryError(report)
for char in id_candidate[1:]:
- if not char_re.match(char):
- raise LibraryError(reports.invalid_id_bad_char(
+ if _ID_REST_CHARS_NOT_RE.match(char):
+ report = reports.invalid_id_bad_char(
id_candidate, description, char, False
- ))
+ )
+ if reporter is not None:
+ reporter.append(report)
+ else:
+ raise LibraryError(report)
-def is_score_value(value):
- if not value:
- return False
- unsigned_value = value[1:] if value[0] in ("+", "-") else value
- return unsigned_value == SCORE_INFINITY or unsigned_value.isdigit()
+def sanitize_id(id_candidate, replacement=""):
+ if not id_candidate:
+ return id_candidate
+ return "".join([
+ "" if _ID_FIRST_CHAR_NOT_RE.match(id_candidate[0]) else id_candidate[0],
+ _ID_REST_CHARS_NOT_RE.sub(replacement, id_candidate[1:])
+ ])
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index b8e53b4..64d7143 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -5,9 +5,86 @@ from __future__ import (
unicode_literals,
)
+from functools import partial
+
from pcs.common import report_codes
from pcs.lib.errors import ReportItem, ReportItemSeverity
+def forceable_error(force_code, report_creator, *args, **kwargs):
+ """
+ Return ReportItem created by report_creator.
+
+ This is experimental shortcut for common pattern. It is intended to
+ cooperate with functions "error" and "warning".
+ the pair with function "warning".
+
+ string force_code is code for forcing error
+ callable report_creator is function that produce ReportItem. It must take
+ parameters forceable (None or force code) and severity
+ (from ReportItemSeverity)
+ rest of args are for the report_creator
+ """
+ return report_creator(
+ *args,
+ forceable=force_code,
+ severity=ReportItemSeverity.ERROR,
+ **kwargs
+ )
+
+def warning(report_creator, *args, **kwargs):
+ """
+ Return ReportItem created by report_creator.
+
+ This is experimental shortcut for common pattern. It is intended to
+ cooperate with functions "error" and "forceable_error".
+
+ callable report_creator is function that produce ReportItem. It must take
+ parameters forceable (None or force code) and severity
+ (from ReportItemSeverity)
+ rest of args are for the report_creator
+ """
+ return report_creator(
+ *args,
+ forceable=None,
+ severity=ReportItemSeverity.WARNING,
+ **kwargs
+ )
+
+def error(report_creator, *args, **kwargs):
+ """
+ Return ReportItem created by report_creator.
+
+ This is experimental shortcut for common pattern. It is intended to
+ cooperate with functions "forceable_error" and "forceable_error".
+
+ callable report_creator is function that produce ReportItem. It must take
+ parameters forceable (None or force code) and severity
+ (from ReportItemSeverity)
+ rest of args are for the report_creator
+ """
+ return report_creator(
+ *args,
+ forceable=None,
+ severity=ReportItemSeverity.ERROR,
+ **kwargs
+ )
+
+def get_problem_creator(force_code=None, is_forced=False):
+ """
+ Returns report creator wraper (forceable_error or warning).
+
+ This is experimental shortcut for decision if ReportItem will be
+ either forceable_error or warning.
+
+ string force_code is code for forcing error. It could be usefull to prepare
+ it for whole module by using functools.partial.
+ bool warn_only is flag for selecting wrapper
+ """
+ if not force_code:
+ return error
+ if is_forced:
+ return warning
+ return partial(forceable_error, force_code)
def common_error(text):
# TODO replace by more specific reports
@@ -81,40 +158,105 @@ def empty_resource_set_list():
report_codes.EMPTY_RESOURCE_SET_LIST,
)
-def required_option_is_missing(name):
+def required_option_is_missing(
+ option_names, option_type=None,
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
"""
required option has not been specified, command cannot continue
+ list name is/are required but was not entered
+ option_type decsribes the option
+ severity report item severity
+ forceable is this report item forceable? by what cathegory?
"""
- return ReportItem.error(
+ return ReportItem(
report_codes.REQUIRED_OPTION_IS_MISSING,
+ severity,
+ forceable=forceable,
info={
- "option_name": name
+ "option_names": option_names,
+ "option_type": option_type,
+ }
+ )
+
+def prerequisite_option_is_missing(
+ option_name, prerequisite_name, option_type="", prerequisite_type=""
+):
+ """
+ if the option_name is specified, the prerequisite_option must be specified
+ string option_name -- an option which depends on the prerequisite_option
+ string prerequisite_name -- the prerequisite option
+ string option_type -- decsribes the option
+ string prerequisite_type -- decsribes the prerequisite_option
+ """
+ return ReportItem.error(
+ report_codes.PREREQUISITE_OPTION_IS_MISSING,
+ info={
+ "option_name": option_name,
+ "option_type": option_type,
+ "prerequisite_name": prerequisite_name,
+ "prerequisite_type": prerequisite_type,
+ }
+ )
+
+def required_option_of_alternatives_is_missing(
+ option_names, option_type=None
+):
+ """
+ at least one option has to be specified
+ iterable option_names -- options from which at least one has to be specified
+ string option_type -- decsribes the option
+ """
+ severity = ReportItemSeverity.ERROR
+ forceable = None
+ return ReportItem(
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ severity,
+ forceable=forceable,
+ info={
+ "option_names": option_names,
+ "option_type": option_type,
}
)
def invalid_option(
- option_name, allowed_options, option_type,
+ option_names, allowed_options, option_type,
severity=ReportItemSeverity.ERROR, forceable=None
):
"""
specified option name is not valid, usualy an error or a warning
- option_name specified invalid option name
+ list option_names specified invalid option names
allowed_options iterable of possible allowed option names
option_type decsribes the option
severity report item severity
forceable is this report item forceable? by what cathegory?
"""
+
return ReportItem(
report_codes.INVALID_OPTION,
severity,
forceable,
info={
- "option_name": option_name,
+ "option_names": option_names,
"option_type": option_type,
"allowed": sorted(allowed_options),
}
)
+def invalid_option_type(option_name, allowed_types):
+ """
+ specified value is not of a valid type for the option
+ string option_name -- option name whose value is not of a valid type
+ list|string allowed_types -- list of allowed types or string description
+ """
+ return ReportItem.error(
+ report_codes.INVALID_OPTION_TYPE,
+ info={
+ "option_name": option_name,
+ "allowed_types": allowed_types,
+ },
+ )
+
def invalid_option_value(
option_name, option_value, allowed_values,
severity=ReportItemSeverity.ERROR, forceable=None
@@ -138,6 +280,21 @@ def invalid_option_value(
forceable=forceable
)
+def mutually_exclusive_options(option_names, option_type):
+ """
+ entered options can not coexist
+ set option_names contain entered mutually exclusive options
+ string option_type decsribes the option
+ """
+ return ReportItem.error(
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ info={
+ "option_names": option_names,
+ "option_type": option_type,
+ },
+ )
+
+
def invalid_id_is_empty(id, id_description):
"""
empty string was specified as an id, which is not valid
@@ -201,7 +358,7 @@ def multiple_score_options():
report_codes.MULTIPLE_SCORE_OPTIONS,
)
-def run_external_process_started(command, stdin):
+def run_external_process_started(command, stdin, environment):
"""
information about running an external process
command string the external process command
@@ -212,6 +369,7 @@ def run_external_process_started(command, stdin):
info={
"command": command,
"stdin": stdin,
+ "environment": environment,
}
)
@@ -277,6 +435,20 @@ def node_communication_finished(target, retval, data):
}
)
+
+def node_communication_debug_info(target, data):
+ """
+ Node communication debug info from pycurl
+ """
+ return ReportItem.debug(
+ report_codes.NODE_COMMUNICATION_DEBUG_INFO,
+ info={
+ "target": target,
+ "data": data,
+ }
+ )
+
+
def node_communication_not_connected(node, reason):
"""
an error occured when connecting to a remote node, debug info
@@ -351,21 +523,26 @@ def node_communication_error_unsupported_command(
forceable=forceable
)
-def node_communication_command_unsuccessful(node, command, reason):
+def node_communication_command_unsuccessful(
+ node, command, reason, severity=ReportItemSeverity.ERROR, forceable=None
+):
"""
node rejected a request for another reason with a plain text explanation
node string node address / name
reason string decription of the error
"""
- return ReportItem.error(
+ return ReportItem(
report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+ severity,
info={
"node": node,
"command": command,
"reason": reason,
- }
+ },
+ forceable=forceable
)
+
def node_communication_error_other_error(
node, command, reason,
severity=ReportItemSeverity.ERROR, forceable=None
@@ -406,6 +583,55 @@ def node_communication_error_unable_to_connect(
forceable=forceable
)
+
+def node_communication_error_timed_out(
+ node, command, reason,
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ Communication with node timed out.
+ """
+ return ReportItem(
+ report_codes.NODE_COMMUNICATION_ERROR_TIMED_OUT,
+ severity,
+ info={
+ "node": node,
+ "command": command,
+ "reason": reason,
+ },
+ forceable=forceable
+ )
+
+def node_communication_proxy_is_set():
+ """
+ Warning when connection failed and there is proxy set in environment
+ variables
+ """
+ return ReportItem.warning(report_codes.NODE_COMMUNICATION_PROXY_IS_SET)
+
+def cannot_add_node_is_in_cluster(node):
+ """
+ Node is in cluster. It is not possible to add it as a new cluster node.
+ """
+ return ReportItem.error(
+ report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
+ info={"node": node}
+ )
+
+def cannot_add_node_is_running_service(node, service):
+ """
+ Node is running service. It is not possible to add it as a new cluster node.
+ string node address of desired node
+ string service name of service (pacemaker, pacemaker_remote)
+ """
+ return ReportItem.error(
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
+ info={
+ "node": node,
+ "service": service,
+ }
+ )
+
def corosync_config_distribution_started():
"""
corosync configuration is about to be sent to nodes
@@ -801,28 +1027,167 @@ def id_already_exists(id):
info={"id": id}
)
-def id_not_found(id, id_description):
+def id_belongs_to_unexpected_type(id, expected_types, current_type):
+ """
+ Specified id exists but for another element than expected.
+ For example user wants to create resource in group that is specifies by id.
+ But id does not belong to group.
+ """
+ return ReportItem.error(
+ report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+ info={
+ "id": id,
+ "expected_types": expected_types,
+ "current_type": current_type,
+ }
+ )
+
+def object_with_id_in_unexpected_context(
+ object_type, object_id, expected_context_type, expected_context_id
+):
+ """
+ Object specified by object_type (tag) and object_id exists but not inside
+ given context (expected_context_type, expected_context_id).
+ """
+ return ReportItem.error(
+ report_codes.OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT,
+ info={
+ "type": object_type,
+ "id": object_id,
+ "expected_context_type": expected_context_type,
+ "expected_context_id": expected_context_id,
+ }
+ )
+
+
+def id_not_found(id, id_description, context_type="", context_id=""):
"""
specified id does not exist in CIB, user referenced a nonexisting id
- use "resource_does_not_exist" if id is a resource id
- id string specified id
- id_description string decribe id's role
+ string id specified id
+ string id_description decribe id's role
+ string context_id specifies the search area
"""
return ReportItem.error(
report_codes.ID_NOT_FOUND,
info={
"id": id,
"id_description": id_description,
+ "context_type": context_type,
+ "context_id": context_id,
+ }
+ )
+
+def resource_bundle_already_contains_a_resource(bundle_id, resource_id):
+ """
+ The bundle already contains a resource, another one caanot be added
+
+ string bundle_id -- id of the bundle
+ string resource_id -- id of the resource already contained in the bundle
+ """
+ return ReportItem.error(
+ report_codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE,
+ info={
+ "bundle_id": bundle_id,
+ "resource_id": resource_id,
+ }
+ )
+
+def resource_cannot_be_next_to_itself_in_group(resource_id, group_id):
+ """
+ Cannot put resource(id=resource_id) into group(id=group_id) next to itself:
+ resource(id=resource_id).
+ """
+ return ReportItem.error(
+ report_codes.RESOURCE_CANNOT_BE_NEXT_TO_ITSELF_IN_GROUP,
+ info={
+ "resource_id": resource_id,
+ "group_id": group_id,
+ }
+ )
+
+def stonith_resources_do_not_exist(
+ stonith_ids, severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ specified stonith resource doesn't exist (e.g. when creating in constraints)
+ iterable stoniths -- list of specified stonith id
+ """
+ return ReportItem(
+ report_codes.STONITH_RESOURCES_DO_NOT_EXIST,
+ severity,
+ info={
+ "stonith_ids": stonith_ids,
+ },
+ forceable=forceable
+ )
+
+def resource_running_on_nodes(
+ resource_id, roles_with_nodes, severity=ReportItemSeverity.INFO
+):
+ """
+ Resource is running on some nodes. Taken from cluster state.
+
+ string resource_id represent the resource
+ list of tuple roles_with_nodes contain pairs (role, node)
+ """
+ return ReportItem(
+ report_codes.RESOURCE_RUNNING_ON_NODES,
+ severity,
+ info={
+ "resource_id": resource_id,
+ "roles_with_nodes": roles_with_nodes,
+ }
+ )
+
+def resource_does_not_run(resource_id, severity=ReportItemSeverity.INFO):
+ """
+ Resource is not running on any node. Taken from cluster state.
+
+ string resource_id represent the resource
+ """
+ return ReportItem(
+ report_codes.RESOURCE_DOES_NOT_RUN,
+ severity,
+ info={
+ "resource_id": resource_id,
}
)
-def resource_does_not_exist(resource_id):
+def resource_is_guest_node_already(resource_id):
"""
- specified resource does not exist (e.g. when creating in constraints)
- resource_id string specified resource id
+ The resource is already used as guest node (i.e. has meta attribute
+ remote-node).
+
+ string resource_id -- id of the resource that is guest node
"""
return ReportItem.error(
- report_codes.RESOURCE_DOES_NOT_EXIST,
+ report_codes.RESOURCE_IS_GUEST_NODE_ALREADY,
+ info={
+ "resource_id": resource_id,
+ }
+ )
+
+def resource_is_unmanaged(resource_id):
+ """
+ The resource the user works with is unmanaged (e.g. in enable/disable)
+
+ string resource_id -- id of the unmanaged resource
+ """
+ return ReportItem.warning(
+ report_codes.RESOURCE_IS_UNMANAGED,
+ info={
+ "resource_id": resource_id,
+ }
+ )
+
+def resource_managed_no_monitor_enabled(resource_id):
+ """
+ The resource which was set to managed mode has no monitor operations enabled
+
+ string resource_id -- id of the resource
+ """
+ return ReportItem.warning(
+ report_codes.RESOURCE_MANAGED_NO_MONITOR_ENABLED,
info={
"resource_id": resource_id,
}
@@ -888,6 +1253,18 @@ def cib_push_error(reason, pushed_cib):
}
)
+def cib_save_tmp_error(reason):
+ """
+ cannot save CIB into a temporary file
+ string reason error description
+ """
+ return ReportItem.error(
+ report_codes.CIB_SAVE_TMP_ERROR,
+ info={
+ "reason": reason,
+ }
+ )
+
def cluster_state_cannot_load(reason):
"""
cannot load cluster status from crm_mon, crm_mon exited with non-zero code
@@ -908,38 +1285,46 @@ def cluster_state_invalid_format():
report_codes.BAD_CLUSTER_STATE_FORMAT,
)
-def resource_wait_not_supported():
+def wait_for_idle_not_supported():
"""
crm_resource does not support --wait
"""
return ReportItem.error(
- report_codes.RESOURCE_WAIT_NOT_SUPPORTED,
+ report_codes.WAIT_FOR_IDLE_NOT_SUPPORTED,
)
-def resource_wait_timed_out(reason):
+def wait_for_idle_timed_out(reason):
"""
waiting for resources (crm_resource --wait) failed, timeout expired
string reason error description
"""
return ReportItem.error(
- report_codes.RESOURCE_WAIT_TIMED_OUT,
+ report_codes.WAIT_FOR_IDLE_TIMED_OUT,
info={
"reason": reason,
}
)
-def resource_wait_error(reason):
+def wait_for_idle_error(reason):
"""
waiting for resources (crm_resource --wait) failed
string reason error description
"""
return ReportItem.error(
- report_codes.RESOURCE_WAIT_ERROR,
+ report_codes.WAIT_FOR_IDLE_ERROR,
info={
"reason": reason,
}
)
+def wait_for_idle_not_live_cluster():
+ """
+ cannot wait for the cluster if not running with a live cluster
+ """
+ return ReportItem.error(
+ report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER,
+ )
+
def resource_cleanup_error(reason, resource=None, node=None):
"""
an error occured when deleting resource history in pacemaker
@@ -967,39 +1352,137 @@ def resource_cleanup_too_time_consuming(threshold):
forceable=report_codes.FORCE_LOAD_THRESHOLD
)
-def node_not_found(node):
+def resource_operation_interval_duplication(duplications):
"""
- specified node does not exist
- node string specified node
+ More operations with same name and same interval apeared.
+ Each operation with the same name (e.g. monitoring) need to have unique
+ interval.
+ dict duplications see resource operation interval duplication
+ in pcs/lib/exchange_formats.md
"""
return ReportItem.error(
- report_codes.NODE_NOT_FOUND,
- info={"node": node}
+ report_codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION,
+ info={
+ "duplications": duplications,
+ }
)
-def pacemaker_local_node_name_not_found(reason):
+def resource_operation_interval_adapted(
+ operation_name, original_interval, adapted_interval
+):
"""
- we are unable to figure out pacemaker's local node's name
- reason string error message
+ Interval of resource operation was adopted to operation (with the same name)
+ intervals were unique.
+ Each operation with the same name (e.g. monitoring) need to have unique
+ interval.
+
"""
- return ReportItem.error(
- report_codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND,
- info={"reason": reason}
+ return ReportItem.warning(
+ report_codes.RESOURCE_OPERATION_INTERVAL_ADAPTED,
+ info={
+ "operation_name": operation_name,
+ "original_interval": original_interval,
+ "adapted_interval": adapted_interval,
+ }
)
-def rrp_active_not_supported(warning=False):
+def node_not_found(
+ node, searched_types=None, severity=ReportItemSeverity.ERROR, forceable=None
+):
"""
- active RRP mode is not supported, require user confirmation
- warning set to True if user confirmed he/she wants to proceed
+ specified node does not exist
+ node string specified node
+ searched_types list|string
"""
return ReportItem(
- report_codes.RRP_ACTIVE_NOT_SUPPORTED,
- ReportItemSeverity.WARNING if warning else ReportItemSeverity.ERROR,
- forceable=(None if warning else report_codes.FORCE_ACTIVE_RRP)
- )
-
-def cman_ignored_option(option):
- """
+ report_codes.NODE_NOT_FOUND,
+ severity,
+ info={
+ "node": node,
+ "searched_types": searched_types if searched_types else []
+ },
+ forceable=forceable
+ )
+
+def node_to_clear_is_still_in_cluster(
+ node, severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ specified node is still in cluster and `crm_node --remove` should be not
+ used
+
+ node string specified node
+ """
+ return ReportItem(
+ report_codes.NODE_TO_CLEAR_IS_STILL_IN_CLUSTER,
+ severity,
+ info={
+ "node": node,
+ },
+ forceable=forceable
+ )
+
+def node_remove_in_pacemaker_failed(node_name, reason):
+ """
+ calling of crm_node --remove failed
+ string reason is caught reason
+ """
+ return ReportItem.error(
+ report_codes.NODE_REMOVE_IN_PACEMAKER_FAILED,
+ info={
+ "node_name": node_name,
+ "reason": reason,
+ }
+ )
+
+def multiple_result_found(
+ result_type, result_identifier_list, search_description="",
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ Multiple result was found when something was looked for. E.g. resource for
+ remote node.
+
+ string result_type specifies what was looked for, e.g. "resource"
+ list result_identifier_list contains identifiers of results
+ e.g. resource ids
+ string search_description e.g. name of remote_node
+ """
+ return ReportItem(
+ report_codes.MULTIPLE_RESULTS_FOUND,
+ severity,
+ info={
+ "result_type": result_type,
+ "result_identifier_list": result_identifier_list,
+ "search_description": search_description,
+ },
+ forceable=forceable
+ )
+
+
+def pacemaker_local_node_name_not_found(reason):
+ """
+ we are unable to figure out pacemaker's local node's name
+ reason string error message
+ """
+ return ReportItem.error(
+ report_codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND,
+ info={"reason": reason}
+ )
+
+def rrp_active_not_supported(warning=False):
+ """
+ active RRP mode is not supported, require user confirmation
+ warning set to True if user confirmed he/she wants to proceed
+ """
+ return ReportItem(
+ report_codes.RRP_ACTIVE_NOT_SUPPORTED,
+ ReportItemSeverity.WARNING if warning else ReportItemSeverity.ERROR,
+ forceable=(None if warning else report_codes.FORCE_ACTIVE_RRP)
+ )
+
+def cman_ignored_option(option):
+ """
specified option is ignored as CMAN clusters do not support it
options string option name
"""
@@ -1321,6 +1804,18 @@ def invalid_resource_agent_name(name):
}
)
+def invalid_stonith_agent_name(name):
+ """
+ The entered stonith agent name is not valid.
+ string name -- entered stonith agent name
+ """
+ return ReportItem.error(
+ report_codes.INVALID_STONITH_AGENT_NAME,
+ info={
+ "name": name,
+ }
+ )
+
def agent_name_guessed(entered_name, guessed_name):
"""
Resource agent name was deduced from the entered name.
@@ -1455,6 +1950,242 @@ def sbd_disabling_started():
)
+def sbd_device_initialization_started(device_list):
+ """
+ initialization of SBD device(s) started
+ """
+ return ReportItem.info(
+ report_codes.SBD_DEVICE_INITIALIZATION_STARTED,
+ info={
+ "device_list": device_list,
+ }
+ )
+
+
+def sbd_device_initialization_success(device_list):
+ """
+ initialization of SBD device(s) successed
+ """
+ return ReportItem.info(
+ report_codes.SBD_DEVICE_INITIALIZATION_SUCCESS,
+ info={
+ "device_list": device_list,
+ }
+ )
+
+
+def sbd_device_initialization_error(device_list, reason):
+ """
+ initialization of SBD device failed
+ """
+ return ReportItem.error(
+ report_codes.SBD_DEVICE_INITIALIZATION_ERROR,
+ info={
+ "device_list": device_list,
+ "reason": reason,
+ }
+ )
+
+
+def sbd_device_list_error(device, reason):
+ """
+ command 'sbd list' failed
+ """
+ return ReportItem.error(
+ report_codes.SBD_DEVICE_LIST_ERROR,
+ info={
+ "device": device,
+ "reason": reason,
+ }
+ )
+
+
+def sbd_device_message_error(device, node, message, reason):
+ """
+ unable to set message 'message' on shared block device 'device'
+ for node 'node'.
+ """
+ return ReportItem.error(
+ report_codes.SBD_DEVICE_MESSAGE_ERROR,
+ info={
+ "device": device,
+ "node": node,
+ "message": message,
+ "reason": reason,
+ }
+ )
+
+
+def sbd_device_dump_error(device, reason):
+ """
+ command 'sbd dump' failed
+ """
+ return ReportItem.error(
+ report_codes.SBD_DEVICE_DUMP_ERROR,
+ info={
+ "device": device,
+ "reason": reason,
+ }
+ )
+
+def files_distribution_started(file_list, node_list=None, description=None):
+ """
+ files is about to be sent to nodes
+ """
+ file_list = file_list if file_list else []
+ return ReportItem.info(
+ report_codes.FILES_DISTRIBUTION_STARTED,
+ info={
+ "file_list": file_list,
+ "node_list": node_list,
+ "description": description,
+ }
+ )
+
+def file_distribution_success(node=None, file_description=None):
+ """
+ files was successfuly distributed on nodes
+
+ string node -- name of destination node
+ string file_description -- name (code) of sucessfully put files
+ """
+ return ReportItem.info(
+ report_codes.FILE_DISTRIBUTION_SUCCESS,
+ info={
+ "node": node,
+ "file_description": file_description,
+ },
+ )
+
+def file_distribution_error(
+ node=None, file_description=None, reason=None,
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ cannot put files to specific nodes
+
+ string node -- name of destination node
+ string file_description -- is file code
+ string reason -- is error message
+ """
+ return ReportItem(
+ report_codes.FILE_DISTRIBUTION_ERROR,
+ severity,
+ info={
+ "node": node,
+ "file_description": file_description,
+ "reason": reason,
+ },
+ forceable=forceable
+ )
+
+def files_remove_from_node_started(file_list, node_list=None, description=None):
+ """
+ files is about to be removed from nodes
+ """
+ file_list = file_list if file_list else []
+ return ReportItem.info(
+ report_codes.FILES_REMOVE_FROM_NODE_STARTED,
+ info={
+ "file_list": file_list,
+ "node_list": node_list,
+ "description": description,
+ }
+ )
+
+def file_remove_from_node_success(node=None, file_description=None):
+ """
+ files was successfuly removed nodes
+
+ string node -- name of destination node
+ string file_description -- name (code) of sucessfully put files
+ """
+ return ReportItem.info(
+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
+ info={
+ "node": node,
+ "file_description": file_description,
+ },
+ )
+
+def file_remove_from_node_error(
+ node=None, file_description=None, reason=None,
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ cannot remove files from specific nodes
+
+ string node -- name of destination node
+ string file_description -- is file code
+ string reason -- is error message
+ """
+ return ReportItem(
+ report_codes.FILE_REMOVE_FROM_NODE_ERROR,
+ severity,
+ info={
+ "node": node,
+ "file_description": file_description,
+ "reason": reason,
+ },
+ forceable=forceable
+ )
+
+def service_commands_on_nodes_started(
+ action_list, node_list=None, description=None
+):
+ """
+ node was requested for actions
+ """
+ action_list = action_list if action_list else []
+ return ReportItem.info(
+ report_codes.SERVICE_COMMANDS_ON_NODES_STARTED,
+ info={
+ "action_list": action_list,
+ "node_list": node_list,
+ "description": description,
+ }
+ )
+
+def service_command_on_node_success(
+ node=None, service_command_description=None
+):
+ """
+ files was successfuly distributed on nodes
+
+ string service_command_description -- name (code) of sucessfully service
+ command
+ """
+ return ReportItem.info(
+ report_codes.SERVICE_COMMAND_ON_NODE_SUCCESS,
+ info={
+ "node": node,
+ "service_command_description": service_command_description,
+ },
+ )
+
+def service_command_on_node_error(
+ node=None, service_command_description=None, reason=None,
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ action on nodes failed
+
+ string service_command_description -- name (code) of sucessfully service
+ command
+ string reason -- is error message
+ """
+ return ReportItem(
+ report_codes.SERVICE_COMMAND_ON_NODE_ERROR,
+ severity,
+ info={
+ "node": node,
+ "service_command_description": service_command_description,
+ "reason": reason,
+ },
+ forceable=forceable
+ )
+
+
def invalid_response_format(node):
"""
error message that response in invalid format has been received from
@@ -1468,6 +2199,69 @@ def invalid_response_format(node):
)
+def sbd_no_device_for_node(node):
+ """
+ there is no device defined for node when enabling sbd with device
+ """
+ return ReportItem.error(
+ report_codes.SBD_NO_DEVICE_FOR_NODE,
+ info={"node": node}
+ )
+
+
+def sbd_too_many_devices_for_node(node, device_list, max_devices):
+ """
+ More than 3 devices defined for node
+ """
+ return ReportItem.error(
+ report_codes.SBD_TOO_MANY_DEVICES_FOR_NODE,
+ info={
+ "node": node,
+ "device_list": device_list,
+ "max_devices": max_devices,
+ }
+ )
+
+
+def sbd_device_path_not_absolute(device, node=None):
+ """
+ path of SBD device is not absolute
+ """
+ return ReportItem.error(
+ report_codes.SBD_DEVICE_PATH_NOT_ABSOLUTE,
+ info={
+ "device": device,
+ "node": node,
+ }
+ )
+
+
+def sbd_device_does_not_exist(device, node):
+ """
+ specified device on node doesn't exist
+ """
+ return ReportItem.error(
+ report_codes.SBD_DEVICE_DOES_NOT_EXIST,
+ info={
+ "device": device,
+ "node": node,
+ }
+ )
+
+
+def sbd_device_is_not_block_device(device, node):
+ """
+ specified device on node is not block device
+ """
+ return ReportItem.error(
+ report_codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE,
+ info={
+ "device": device,
+ "node": node,
+ }
+ )
+
+
def sbd_not_installed(node):
"""
sbd is not installed on specified node
@@ -1564,18 +2358,6 @@ def cib_alert_recipient_invalid_value(recipient_value):
info={"recipient": recipient_value}
)
-def cib_alert_not_found(alert_id):
- """
- Alert with specified id doesn't exist.
-
- alert_id -- id of alert
- """
- return ReportItem.error(
- report_codes.CIB_ALERT_NOT_FOUND,
- info={"alert": alert_id}
- )
-
-
def cib_upgrade_successful():
"""
Upgrade of CIB schema was successful.
@@ -1682,6 +2464,58 @@ def live_environment_required(forbidden_options):
}
)
+def live_environment_required_for_local_node():
+ """
+ The operation cannot be performed on CIB in file (not live cluster) if no
+ node name is specified i.e. working with the local node
+ """
+ return ReportItem.error(
+ report_codes.LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE,
+ )
+
+def nolive_skip_files_distribution(files_description, nodes):
+ """
+ When running action with e.g. -f the files was not distributed to nodes.
+ list files_description -- contains description of files
+ list nodes -- destinations where should be files distributed
+ """
+ return ReportItem.info(
+ report_codes.NOLIVE_SKIP_FILES_DISTRIBUTION,
+ info={
+ "files_description": files_description,
+ "nodes": nodes,
+ }
+ )
+
+def nolive_skip_files_remove(files_description, nodes):
+ """
+ When running action with e.g. -f the files was not removed from nodes.
+ list files_description -- contains description of files
+ list nodes -- destinations from where should be files removed
+ """
+ return ReportItem.info(
+ report_codes.NOLIVE_SKIP_FILES_REMOVE,
+ info={
+ "files_description": files_description,
+ "nodes": nodes,
+ }
+ )
+
+def nolive_skip_service_command_on_nodes(service, command, nodes):
+ """
+ When running action with e.g. -f the service command is not run on nodes.
+ string service -- e.g. pacemaker, pacemaker_remote, corosync
+ string command -- e.g. start, enable, stop, disable
+ list nodes -- destinations where should be commad run
+ """
+ return ReportItem.info(
+ report_codes.NOLIVE_SKIP_SERVICE_COMMAND_ON_NODES,
+ info={
+ "service": service,
+ "command": command,
+ "nodes": nodes,
+ }
+ )
def quorum_cannot_disable_atb_due_to_sbd(
severity=ReportItemSeverity.ERROR, forceable=None
@@ -1766,3 +2600,70 @@ def cluster_conf_read_error(path, reason):
"reason": reason,
}
)
+
+def fencing_level_already_exists(level, target_type, target_value, devices):
+ """
+ Fencing level already exists, it cannot be created
+ """
+ return ReportItem.error(
+ report_codes.CIB_FENCING_LEVEL_ALREADY_EXISTS,
+ info={
+ "level": level,
+ "target_type": target_type,
+ "target_value": target_value,
+ "devices": devices,
+ }
+ )
+
+def fencing_level_does_not_exist(level, target_type, target_value, devices):
+ """
+ Fencing level does not exist, it cannot be updated or deleted
+ """
+ return ReportItem.error(
+ report_codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST,
+ info={
+ "level": level,
+ "target_type": target_type,
+ "target_value": target_value,
+ "devices": devices,
+ }
+ )
+
+def use_command_node_add_remote(
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ Advise the user for more appropriate command.
+ """
+ return ReportItem(
+ report_codes.USE_COMMAND_NODE_ADD_REMOTE,
+ severity,
+ info={},
+ forceable=forceable
+ )
+
+def use_command_node_add_guest(
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ Advise the user for more appropriate command.
+ """
+ return ReportItem(
+ report_codes.USE_COMMAND_NODE_ADD_GUEST,
+ severity,
+ info={},
+ forceable=forceable
+ )
+
+def use_command_node_remove_guest(
+ severity=ReportItemSeverity.ERROR, forceable=None
+):
+ """
+ Advise the user for more appropriate command.
+ """
+ return ReportItem(
+ report_codes.USE_COMMAND_NODE_REMOVE_GUEST,
+ severity,
+ info={},
+ forceable=forceable
+ )
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
index 150f2b4..fb3cf0b 100644
--- a/pcs/lib/resource_agent.py
+++ b/pcs/lib/resource_agent.py
@@ -7,17 +7,73 @@ from __future__ import (
import os
import re
+from collections import namedtuple
+
from lxml import etree
from pcs import settings
+from pcs.common import report_codes
+from pcs.common.tools import xml_fromstring
from pcs.lib import reports
from pcs.lib.errors import LibraryError, ReportItemSeverity
-from pcs.lib.pacemaker_values import is_true
-from pcs.common import report_codes
+from pcs.lib.pacemaker.values import is_true
_crm_resource = os.path.join(settings.pacemaker_binaries, "crm_resource")
+DEFAULT_RESOURCE_CIB_ACTION_NAMES = [
+ "monitor",
+ "start",
+ "stop",
+ "promote",
+ "demote",
+]
+DEFAULT_STONITH_CIB_ACTION_NAMES = ["monitor"]
+
+# Operation monitor is required always! No matter if --no-default-ops was
+# entered or if agent does not specify it. See
+# http://clusterlabs.org/doc/en-US/Pacemaker/1.1-pcs/html-single/Pacemaker_Explained/index.html#_resource_operations
+NECESSARY_CIB_ACTION_NAMES = ["monitor"]
+
+#These are all standards valid in cib. To get a list of standards supported by
+#pacemaker in local environment use result of "pcs resource standards".
+STANDARD_LIST = [
+ "ocf",
+ "lsb",
+ "heartbeat",
+ "stonith",
+ "upstart",
+ "service",
+ "systemd",
+ "nagios",
+]
+
+DEFAULT_INTERVALS = {
+ "monitor": "60s"
+}
+
+
+def get_default_interval(operation_name):
+ """
+ Return default interval for given operation_name.
+ string operation_name
+ """
+ return DEFAULT_INTERVALS.get(operation_name, "0s")
+
+def complete_all_intervals(raw_operation_list):
+ """
+ Return operation_list based on raw_operation_list where each item has key
+ "interval".
+
+ list of dict raw_operation_list can include items withou key "interval".
+ """
+ operation_list = []
+ for raw_operation in raw_operation_list:
+ operation = raw_operation.copy()
+ if "interval" not in operation:
+ operation["interval"] = get_default_interval(operation["name"])
+ operation_list.append(operation)
+ return operation_list
class ResourceAgentError(Exception):
# pylint: disable=super-init-not-called
@@ -32,6 +88,56 @@ class UnableToGetAgentMetadata(ResourceAgentError):
class InvalidResourceAgentName(ResourceAgentError):
pass
+class InvalidStonithAgentName(ResourceAgentError):
+ pass
+
+class ResourceAgentName(
+ namedtuple("ResourceAgentName", "standard provider type")
+):
+ @property
+ def full_name(self):
+ return ":".join(
+ filter(
+ None,
+ [self.standard, self.provider, self.type]
+ )
+ )
+
+def get_resource_agent_name_from_string(full_agent_name):
+ #full_agent_name could be for example systemd:lvm2-pvscan at 252:2
+ #note that the second colon is not separator of provider and type
+ match = re.match(
+ "^(?P<standard>systemd|service):(?P<agent_type>[^:@]+ at .*)$",
+ full_agent_name
+ )
+ if match:
+ return ResourceAgentName(
+ match.group("standard"),
+ None,
+ match.group("agent_type")
+ )
+
+ match = re.match(
+ "^(?P<standard>[^:]+)(:(?P<provider>[^:]+))?:(?P<type>[^:]+)$",
+ full_agent_name
+ )
+ if not match:
+ raise InvalidResourceAgentName(full_agent_name)
+
+ standard = match.group("standard")
+ provider = match.group("provider") if match.group("provider") else None
+ agent_type = match.group("type")
+
+ if standard not in STANDARD_LIST:
+ raise InvalidResourceAgentName(full_agent_name)
+
+ if standard == "ocf" and not provider:
+ raise InvalidResourceAgentName(full_agent_name)
+
+ if standard != "ocf" and provider:
+ raise InvalidResourceAgentName(full_agent_name)
+
+ return ResourceAgentName(standard, provider, agent_type)
def list_resource_agents_standards(runner):
"""
@@ -186,8 +292,18 @@ def guess_exactly_one_resource_agent_full_name(runner, search_agent_name):
return agents[0]
def find_valid_resource_agent_by_name(
- report_processor, runner, name, allowed_absent=False
+ report_processor, runner, name,
+ allowed_absent=False, absent_agent_supported=True
):
+ """
+ Return instance of ResourceAgent corresponding to name
+
+ report_processor is tool for warning/info/error reporting
+ runner is tool for launching external commands
+ string name specifies a searched agent
+ bool absent_agent_supported flag decides if is possible to allow to return
+ absent agent: if is produced forceable/no-forcable error
+ """
if ":" not in name:
agent = guess_exactly_one_resource_agent_full_name(runner, name)
report_processor.process(
@@ -195,26 +311,59 @@ def find_valid_resource_agent_by_name(
)
return agent
+ return _find_valid_agent_by_name(
+ report_processor,
+ runner,
+ name,
+ ResourceAgent,
+ AbsentResourceAgent if allowed_absent else None,
+ absent_agent_supported=absent_agent_supported,
+ )
+
+def find_valid_stonith_agent_by_name(
+ report_processor, runner, name,
+ allowed_absent=False, absent_agent_supported=True
+):
+ return _find_valid_agent_by_name(
+ report_processor,
+ runner,
+ name,
+ StonithAgent,
+ AbsentStonithAgent if allowed_absent else None,
+ absent_agent_supported=absent_agent_supported,
+ )
+
+def _find_valid_agent_by_name(
+ report_processor, runner, name, PresentAgentClass, AbsentAgentClass,
+ absent_agent_supported=True
+):
try:
- return ResourceAgent(runner, name).validate_metadata()
- except InvalidResourceAgentName as e:
+ return PresentAgentClass(runner, name).validate_metadata()
+ except (InvalidResourceAgentName, InvalidStonithAgentName) as e:
raise LibraryError(resource_agent_error_to_report_item(e))
except UnableToGetAgentMetadata as e:
- if not allowed_absent:
+ if not absent_agent_supported:
raise LibraryError(resource_agent_error_to_report_item(e))
+ if not AbsentAgentClass:
+ raise LibraryError(resource_agent_error_to_report_item(
+ e,
+ forceable=True
+ ))
+
report_processor.process(resource_agent_error_to_report_item(
e,
severity=ReportItemSeverity.WARNING,
- forceable=True
))
- return AbsentResourceAgent(runner, name)
+ return AbsentAgentClass(runner, name)
class Agent(object):
"""
Base class for providing convinient access to an agent's metadata
"""
+ DEFAULT_CIB_ACTION_NAMES = []
+
def __init__(self, runner):
"""
create an instance which reads metadata by itself on demand
@@ -258,6 +407,7 @@ class Agent(object):
agent_info = self.get_description_info()
agent_info["parameters"] = self.get_parameters()
agent_info["actions"] = self.get_actions()
+ agent_info["default_actions"] = self.get_cib_default_actions()
return agent_info
@@ -298,10 +448,12 @@ class Agent(object):
params_element = self._get_metadata().find("parameters")
if params_element is None:
return []
- return [
- self._get_parameter(parameter)
- for parameter in params_element.iter("parameter")
- ]
+ param_list = []
+ for param_el in params_element.iter("parameter"):
+ param = self._get_parameter(param_el)
+ if not param["obsoletes"]:
+ param_list.append(param)
+ return param_list
def _get_parameter(self, parameter_element):
@@ -324,8 +476,44 @@ class Agent(object):
"default": default_value,
"required": is_true(parameter_element.get("required", "0")),
"advanced": False,
+ "deprecated": is_true(parameter_element.get("deprecated", "0")),
+ "obsoletes": parameter_element.get("obsoletes", None),
}
+ def validate_parameters(
+ self, parameters,
+ parameters_type="resource agent parameter",
+ allow_invalid=False
+ ):
+ forceable = report_codes.FORCE_OPTIONS if not allow_invalid else None
+ severity = (
+ ReportItemSeverity.ERROR if not allow_invalid
+ else ReportItemSeverity.WARNING
+ )
+
+ report_list = []
+ bad_opts, missing_req_opts = self.validate_parameters_values(
+ parameters
+ )
+
+ if bad_opts:
+ report_list.append(reports.invalid_option(
+ bad_opts,
+ sorted([attr["name"] for attr in self.get_parameters()]),
+ parameters_type,
+ severity=severity,
+ forceable=forceable,
+ ))
+
+ if missing_req_opts:
+ report_list.append(reports.required_option_is_missing(
+ missing_req_opts,
+ parameters_type,
+ severity=severity,
+ forceable=forceable,
+ ))
+
+ return report_list
def validate_parameters_values(self, parameters_values):
"""
@@ -350,11 +538,7 @@ class Agent(object):
required_missing
)
-
- def get_actions(self):
- """
- Get list of agent's actions (operations)
- """
+ def _get_raw_actions(self):
actions_element = self._get_metadata().find("actions")
if actions_element is None:
return []
@@ -366,6 +550,41 @@ class Agent(object):
for action in actions_element.iter("action")
]
+ def get_actions(self):
+ """
+ Get list of agent's actions (operations). Each action is represented as
+ dict. Example: [{"name": "monitor", "timeout": 20, "interval": 10}]
+ """
+ action_list = []
+ for raw_action in self._get_raw_actions():
+ action = {}
+ for key, value in raw_action.items():
+ if key != "depth":
+ action[key] = value
+ elif value != "0":
+ action["OCF_CHECK_LEVEL"] = value
+ action_list.append(action)
+ return action_list
+
+ def get_cib_default_actions(self, necessary_only=False):
+ """
+ List actions that should be put to resource on its creation.
+ Note that every action has at least attribute name.
+ """
+
+ action_list = [
+ action for action in self.get_actions()
+ if action.get("name", "") in (
+ NECESSARY_CIB_ACTION_NAMES if necessary_only
+ else self.DEFAULT_CIB_ACTION_NAMES
+ )
+ ]
+
+ for action_name in NECESSARY_CIB_ACTION_NAMES:
+ if action_name not in [action["name"] for action in action_list]:
+ action_list.append({"name": action_name})
+
+ return complete_all_intervals(action_list)
def _get_metadata(self):
"""
@@ -384,7 +603,7 @@ class Agent(object):
def _parse_metadata(self, metadata):
try:
- dom = etree.fromstring(metadata)
+ dom = xml_fromstring(metadata)
# TODO Majority of agents don't provide valid metadata, so we skip
# the validation for now. We want to enable it once the schema
# and/or agents are fixed.
@@ -403,12 +622,8 @@ class Agent(object):
class FakeAgentMetadata(Agent):
- def get_name(self):
- raise NotImplementedError()
-
-
- def _load_metadata(self):
- raise NotImplementedError()
+ #pylint:disable=abstract-method
+ pass
class StonithdMetadata(FakeAgentMetadata):
@@ -443,19 +658,29 @@ class StonithdMetadata(FakeAgentMetadata):
class CrmAgent(Agent):
- def __init__(self, runner, full_agent_name):
+ #pylint:disable=abstract-method
+ def __init__(self, runner, name):
"""
init
CommandRunner runner
- string full_agent_name standard:provider:type or standard:type
"""
super(CrmAgent, self).__init__(runner)
- self._full_agent_name = full_agent_name
+ self._name_parts = self._prepare_name_parts(name)
+ def _prepare_name_parts(self, name):
+ raise NotImplementedError()
- def get_name(self):
- return self._full_agent_name
+ def _get_full_name(self):
+ return self._name_parts.full_name
+
+ def get_standard(self):
+ return self._name_parts.standard
+
+ def get_provider(self):
+ return self._name_parts.provider
+ def get_type(self):
+ return self._name_parts.type
def is_valid_metadata(self):
"""
@@ -486,7 +711,7 @@ class CrmAgent(Agent):
"/usr/bin/",
])
stdout, stderr, retval = self._runner.run(
- [_crm_resource, "--show-metadata", self._full_agent_name],
+ [_crm_resource, "--show-metadata", self._get_full_name()],
env_extend={
"PATH": env_path,
}
@@ -497,40 +722,101 @@ class CrmAgent(Agent):
class ResourceAgent(CrmAgent):
+ DEFAULT_CIB_ACTION_NAMES = DEFAULT_RESOURCE_CIB_ACTION_NAMES
"""
Provides convinient access to a resource agent's metadata
"""
- def __init__(self, runner, full_agent_name):
- if not re.match("^[^:]+(:[^:]+){1,2}$", full_agent_name):
- raise InvalidResourceAgentName(full_agent_name)
- super(ResourceAgent, self).__init__(runner, full_agent_name)
+ def _prepare_name_parts(self, name):
+ return get_resource_agent_name_from_string(name)
-class AbsentResourceAgent(ResourceAgent):
+ def get_name(self):
+ return self._get_full_name()
+
+ def get_parameters(self):
+ parameters = super(ResourceAgent, self).get_parameters()
+ if (
+ self.get_standard() == "ocf"
+ and
+ (self.get_provider() in ("heartbeat", "pacemaker"))
+ ):
+ trace_ra_found = False
+ trace_file_found = False
+ for param in parameters:
+ param_name = param["name"].lower()
+ if param_name == "trace_ra":
+ trace_ra_found = True
+ if param_name == "trace_file":
+ trace_file_found = True
+ if trace_file_found and trace_ra_found:
+ break
+
+ if not trace_ra_found:
+ shortdesc = (
+ "Set to 1 to turn on resource agent tracing"
+ " (expect large output)"
+ )
+ parameters.append({
+ "name": "trace_ra",
+ "longdesc": (
+ shortdesc
+ +
+ " The trace output will be saved to trace_file, if set,"
+ " or by default to"
+ " $HA_VARRUN/ra_trace/<type>/<id>.<action>.<timestamp>"
+ " e.g. $HA_VARRUN/ra_trace/oracle/"
+ "db.start.2012-11-27.08:37:08"
+ ),
+ "shortdesc": shortdesc,
+ "type": "integer",
+ "default": 0,
+ "required": False,
+ "advanced": True,
+ })
+ if not trace_file_found:
+ shortdesc = (
+ "Path to a file to store resource agent tracing log"
+ )
+ parameters.append({
+ "name": "trace_file",
+ "longdesc": shortdesc,
+ "shortdesc": shortdesc,
+ "type": "string",
+ "default": "",
+ "required": False,
+ "advanced": True,
+ })
+
+ return parameters
+
+
+class AbsentAgentMixin(object):
def _load_metadata(self):
return "<resource-agent/>"
def validate_parameters_values(self, parameters_values):
return ([], [])
+
+class AbsentResourceAgent(AbsentAgentMixin, ResourceAgent):
+ pass
+
+
class StonithAgent(CrmAgent):
"""
Provides convinient access to a stonith agent's metadata
"""
+ DEFAULT_CIB_ACTION_NAMES = DEFAULT_STONITH_CIB_ACTION_NAMES
_stonithd_metadata = None
-
- def __init__(self, runner, agent_name):
- super(StonithAgent, self).__init__(
- runner,
- "stonith:{0}".format(agent_name)
- )
- self._agent_name = agent_name
-
+ def _prepare_name_parts(self, name):
+ # pacemaker doesn't support stonith (nor resource) agents with : in type
+ if ":" in name:
+ raise InvalidStonithAgentName(name)
+ return ResourceAgentName("stonith", None, name)
def get_name(self):
- return self._agent_name
-
+ return self.get_type()
def get_parameters(self):
return (
@@ -541,7 +827,6 @@ class StonithAgent(CrmAgent):
self._get_stonithd_metadata().get_parameters()
)
-
def _filter_parameters(self, parameters):
"""
Remove parameters that should not be available to the user.
@@ -579,35 +864,14 @@ class StonithAgent(CrmAgent):
# Pacemaker marks the 'port' parameter as not required for us.
return filtered
-
def _get_stonithd_metadata(self):
if not self.__class__._stonithd_metadata:
self.__class__._stonithd_metadata = StonithdMetadata(self._runner)
return self.__class__._stonithd_metadata
-
- def get_actions(self):
- # In previous versions of pcs there was no way to read actions from
- # stonith agents, the functions always returned an empty list. It
- # wasn't clear if that is a mistake or an intention. We keep it that
- # way for two reasons:
- # 1) Fence agents themselfs specify the actions without any attributes
- # (interval, timeout)
- # 2) Pacemaker explained shows an example stonith agent configuration
- # in CIB with only monitor operation specified (and that pcs creates
- # automatically in "pcs stonith create" regardless of provided actions
- # from here).
- # It may be better to return real actions from this class and deal ommit
- # them in higher layers, which can decide if the actions are desired or
- # not. For now there is not enough information to do that. Code which
- # uses this is not clean enough. Once everything is cleaned we should
- # decide if it is better to move this to higher level.
- return []
-
-
def get_provides_unfencing(self):
# self.get_actions returns an empty list
- for action in super(StonithAgent, self).get_actions():
+ for action in self._get_raw_actions():
if (
action.get("name", "") == "on"
and
@@ -619,6 +883,11 @@ class StonithAgent(CrmAgent):
return False
+class AbsentStonithAgent(AbsentAgentMixin, StonithAgent):
+ def get_parameters(self):
+ return []
+
+
def resource_agent_error_to_report_item(
e, severity=ReportItemSeverity.ERROR, forceable=False
):
@@ -634,4 +903,6 @@ def resource_agent_error_to_report_item(
)
if e.__class__ == InvalidResourceAgentName:
return reports.invalid_resource_agent_name(e.agent)
+ if e.__class__ == InvalidStonithAgentName:
+ return reports.invalid_stonith_agent_name(e.agent)
raise e
diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
index f6b305d..ff8c71f 100644
--- a/pcs/lib/sbd.py
+++ b/pcs/lib/sbd.py
@@ -6,6 +6,7 @@ from __future__ import (
)
import json
+from os import path
from pcs import settings
from pcs.common import tools
@@ -13,7 +14,7 @@ from pcs.lib import (
external,
reports,
)
-from pcs.lib.tools import dict_to_environment_file
+from pcs.lib.tools import dict_to_environment_file, environment_file_to_dict
from pcs.lib.external import (
NodeCommunicator,
node_communicator_exception_to_report_item,
@@ -22,6 +23,14 @@ from pcs.lib.external import (
from pcs.lib.errors import LibraryError
+DEVICE_INITIALIZATION_OPTIONS_MAPPING = {
+ "watchdog-timeout": "-1",
+ "allocate-timeout": "-2",
+ "loop-timeout": "-3",
+ "msgwait-timeout": "-4",
+}
+
+
def _run_parallel_and_raise_lib_error_on_failure(func, param_list):
"""
Run function func in parallel for all specified parameters in arg_list.
@@ -86,6 +95,8 @@ def is_auto_tie_breaker_needed(
is_sbd_installed(runner)
and
is_sbd_enabled(runner)
+ and
+ not is_device_set_local()
)
@@ -123,22 +134,29 @@ def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0):
)
-def check_sbd(communicator, node, watchdog):
+def check_sbd(communicator, node, watchdog, device_list):
"""
- Check SBD on specified 'node' and existence of specified watchdog.
+ Check SBD on specified 'node' and existence of specified watchdog and
+ devices.
communicator -- NodeCommunicator
node -- NodeAddresses
watchdog -- watchdog path
+ device_list -- list of strings
"""
return communicator.call_node(
node,
"remote/check_sbd",
- NodeCommunicator.format_data_dict([("watchdog", watchdog)])
+ NodeCommunicator.format_data_dict([
+ ("watchdog", watchdog),
+ ("device_list", NodeCommunicator.format_data_json(device_list)),
+ ])
)
-def check_sbd_on_node(report_processor, node_communicator, node, watchdog):
+def check_sbd_on_node(
+ report_processor, node_communicator, node, watchdog, device_list
+):
"""
Check if SBD can be enabled on specified 'node'.
Raises LibraryError if check fails.
@@ -148,15 +166,29 @@ def check_sbd_on_node(report_processor, node_communicator, node, watchdog):
node_communicator -- NodeCommunicator
node -- NodeAddresses
watchdog -- watchdog path
+ device_list -- list of strings
"""
report_list = []
try:
- data = json.loads(check_sbd(node_communicator, node, watchdog))
+ data = json.loads(
+ check_sbd(node_communicator, node, watchdog, device_list)
+ )
if not data["sbd"]["installed"]:
report_list.append(reports.sbd_not_installed(node.label))
if not data["watchdog"]["exist"]:
report_list.append(reports.watchdog_not_found(node.label, watchdog))
- except (ValueError, KeyError):
+ for device in data.get("device_list", []):
+ if not device["exist"]:
+ report_list.append(reports.sbd_device_does_not_exist(
+ device["path"], node.label
+ ))
+ elif not device["block_device"]:
+ report_list.append(reports.sbd_device_is_not_block_device(
+ device["path"], node.label
+ ))
+ # TODO maybe we can check whenever device is initialized by sbd (by
+ # running 'sbd -d <dev> dump;')
+ except (ValueError, KeyError, TypeError):
raise LibraryError(reports.invalid_response_format(node.label))
if report_list:
@@ -164,25 +196,29 @@ def check_sbd_on_node(report_processor, node_communicator, node, watchdog):
report_processor.process(reports.sbd_check_success(node.label))
-def check_sbd_on_all_nodes(report_processor, node_communicator, nodes_watchdog):
+def check_sbd_on_all_nodes(report_processor, node_communicator, nodes_data):
"""
Checks SBD (if SBD is installed and watchdog exists) on all NodeAddresses
- defined as keys in data.
+ defined as keys in nodes_data.
Raises LibraryError with all ReportItems in case of any failure.
report_processor --
node_communicator -- NodeCommunicator
- nodes_watchdog -- dictionary with NodeAddresses as keys and watchdog path
- as value
+ nodes_data -- dictionary with NodeAddresses as keys and dict (with keys
+ 'watchdog' and 'device_list') as value
"""
report_processor.process(reports.sbd_check_started())
- _run_parallel_and_raise_lib_error_on_failure(
- check_sbd_on_node,
- [
- ([report_processor, node_communicator, node, watchdog], {})
- for node, watchdog in sorted(nodes_watchdog.items())
- ]
- )
+ data_list = []
+ for node, data in sorted(nodes_data.items()):
+ data_list.append((
+ [
+ report_processor, node_communicator, node, data["watchdog"],
+ data["device_list"]
+ ],
+ {}
+ ))
+
+ _run_parallel_and_raise_lib_error_on_failure(check_sbd_on_node, data_list)
def set_sbd_config(communicator, node, config):
@@ -201,7 +237,8 @@ def set_sbd_config(communicator, node, config):
def set_sbd_config_on_node(
- report_processor, node_communicator, node, config, watchdog
+ report_processor, node_communicator, node, config, watchdog,
+ device_list=None
):
"""
Send SBD configuration to 'node' with specified watchdog set. Also puts
@@ -212,11 +249,14 @@ def set_sbd_config_on_node(
node -- NodeAddresses
config -- dictionary in format: <SBD config option>: <value>
watchdog -- path to watchdog device
+ device_list -- list of strings
"""
config = dict(config)
config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node.label)
if watchdog:
config["SBD_WATCHDOG_DEV"] = watchdog
+ if device_list:
+ config["SBD_DEVICE"] = '"{0}"'.format(";".join(device_list))
set_sbd_config(node_communicator, node, dict_to_environment_file(config))
report_processor.process(
reports.sbd_config_accepted_by_node(node.label)
@@ -224,7 +264,8 @@ def set_sbd_config_on_node(
def set_sbd_config_on_all_nodes(
- report_processor, node_communicator, node_list, config, watchdog_dict
+ report_processor, node_communicator, node_list, config, watchdog_dict,
+ device_dict
):
"""
Send SBD configuration 'config' to all nodes in 'node_list'. Option
@@ -237,6 +278,8 @@ def set_sbd_config_on_all_nodes(
config -- dictionary in format: <SBD config option>: <value>
watchdog_dict -- dictionary of watchdogs where key is NodeAdresses object
and value is path to watchdog
+ device_dict -- distionary with NodeAddresses as keys and lists of devices
+ as values
"""
report_processor.process(reports.sbd_config_distribution_started())
_run_parallel_and_raise_lib_error_on_failure(
@@ -245,7 +288,7 @@ def set_sbd_config_on_all_nodes(
(
[
report_processor, node_communicator, node, config,
- watchdog_dict.get(node)
+ watchdog_dict.get(node), device_dict.get(node)
],
{}
)
@@ -412,7 +455,7 @@ def get_default_sbd_config():
return {
"SBD_DELAY_START": "no",
"SBD_PACEMAKER": "yes",
- "SBD_STARTMODE": "clean",
+ "SBD_STARTMODE": "always",
"SBD_WATCHDOG_DEV": settings.sbd_watchdog_default,
"SBD_WATCHDOG_TIMEOUT": "5"
}
@@ -468,3 +511,114 @@ def is_sbd_installed(runner):
"""
return external.is_service_installed(runner, get_sbd_service_name())
+
+def initialize_block_devices(
+ report_processor, cmd_runner, device_list, option_dict
+):
+ """
+ Initialize devices with specified options in option_dict.
+ Raise LibraryError on failure.
+
+ report_processor -- report processor
+ cmd_runner -- CommandRunner
+ device_list -- list of strings
+ option_dict -- dictionary of options and their values
+ """
+ report_processor.process(
+ reports.sbd_device_initialization_started(device_list)
+ )
+
+ cmd = [settings.sbd_binary]
+ for device in device_list:
+ cmd += ["-d", device]
+
+ for option, value in sorted(option_dict.items()):
+ cmd += [DEVICE_INITIALIZATION_OPTIONS_MAPPING[option], str(value)]
+
+ cmd.append("create")
+ _, std_err, ret_val = cmd_runner.run(cmd)
+ if ret_val != 0:
+ raise LibraryError(
+ reports.sbd_device_initialization_error(device_list, std_err)
+ )
+ report_processor.process(
+ reports.sbd_device_initialization_success(device_list)
+ )
+
+
+def get_local_sbd_device_list():
+ """
+ Returns list of devices specified in local SBD config
+ """
+ if not path.exists(settings.sbd_config):
+ return []
+
+ cfg = environment_file_to_dict(get_local_sbd_config())
+ if "SBD_DEVICE" not in cfg:
+ return []
+ devices = cfg["SBD_DEVICE"]
+ if devices.startswith('"') and devices.endswith('"'):
+ devices = devices[1:-1]
+ return [
+ device.strip()
+ for device in devices.split(";") if device.strip()
+ ]
+
+
+def is_device_set_local():
+ """
+ Returns True if there is at least one device specified in local SBD config,
+ False otherwise.
+ """
+ return len(get_local_sbd_device_list()) > 0
+
+
+def get_device_messages_info(cmd_runner, device):
+ """
+ Returns info about messages (string) stored on specified SBD device.
+
+ cmd_runner -- CommandRunner
+ device -- string
+ """
+ std_out, dummy_std_err, ret_val = cmd_runner.run(
+ [settings.sbd_binary, "-d", device, "list"]
+ )
+ if ret_val != 0:
+ # sbd writes error message into std_out
+ raise LibraryError(reports.sbd_device_list_error(device, std_out))
+ return std_out
+
+
+def get_device_sbd_header_dump(cmd_runner, device):
+ """
+ Returns header dump (string) of specified SBD device.
+
+ cmd_runner -- CommandRunner
+ device -- string
+ """
+ std_out, dummy_std_err, ret_val = cmd_runner.run(
+ [settings.sbd_binary, "-d", device, "dump"]
+ )
+ if ret_val != 0:
+ # sbd writes error message into std_out
+ raise LibraryError(reports.sbd_device_dump_error(device, std_out))
+ return std_out
+
+
+def set_message(cmd_runner, device, node_name, message):
+ """
+ Set message of specified type 'message' on SBD device for node.
+
+ cmd_runner -- CommandRunner
+ device -- string, device path
+ node_name -- string, nae of node for which message should be set
+ message -- string, message type
+ """
+ dummy_std_out, std_err, ret_val = cmd_runner.run(
+ [settings.sbd_binary, "-d", device, "message", node_name, message]
+ )
+ if ret_val != 0:
+ raise LibraryError(reports.sbd_device_message_error(
+ device, node_name, message, std_err
+ ))
+
diff --git a/pcs/test/test_lib_env.py b/pcs/lib/test/test_env.py
similarity index 77%
rename from pcs/test/test_lib_env.py
rename to pcs/lib/test/test_env.py
index 05c70d4..027fe48 100644
--- a/pcs/test/test_lib_env.py
+++ b/pcs/lib/test/test_env.py
@@ -7,6 +7,7 @@ from __future__ import (
from pcs.test.tools.pcs_unittest import TestCase
import logging
+from functools import partial
from lxml import etree
from pcs.test.tools.assertions import (
@@ -15,7 +16,7 @@ from pcs.test.tools.assertions import (
assert_report_item_list_equal,
)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.misc import get_test_resource as rc, create_patcher
from pcs.test.tools.pcs_unittest import mock
from pcs.lib.env import LibraryEnvironment
@@ -28,6 +29,10 @@ from pcs.lib.errors import (
ReportItemSeverity as severity,
)
+
+patch_env = create_patcher("pcs.lib.env")
+patch_env_object = partial(mock.patch.object, LibraryEnvironment)
+
class LibraryEnvironmentTest(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
@@ -67,7 +72,7 @@ class LibraryEnvironmentTest(TestCase):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual([], env.user_groups)
- @mock.patch("pcs.lib.env.is_cman_cluster")
+ @patch_env("is_cman_cluster")
def test_is_cman_cluster(self, mock_is_cman):
mock_is_cman.return_value = True
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -75,8 +80,8 @@ class LibraryEnvironmentTest(TestCase):
self.assertTrue(env.is_cman_cluster)
self.assertEqual(1, mock_is_cman.call_count)
- @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("replace_cib_configuration_xml")
+ @patch_env("get_cib_xml")
def test_cib_set(self, mock_get_cib, mock_push_cib):
cib_data = "test cib data"
new_cib_data = "new test cib data"
@@ -97,8 +102,8 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual(new_cib_data, env._get_cib_xml())
self.assertEqual(0, mock_get_cib.call_count)
- @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("replace_cib_configuration_xml")
+ @patch_env("get_cib_xml")
def test_cib_not_set(self, mock_get_cib, mock_push_cib):
cib_data = "test cib data"
new_cib_data = "new test cib data"
@@ -113,8 +118,8 @@ class LibraryEnvironmentTest(TestCase):
env._push_cib_xml(new_cib_data)
self.assertEqual(1, mock_push_cib.call_count)
- @mock.patch("pcs.lib.env.ensure_cib_version")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("ensure_cib_version")
+ @patch_env("get_cib_xml")
def test_get_cib_no_version_live(
self, mock_get_cib_xml, mock_ensure_cib_version
):
@@ -125,8 +130,8 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual(0, mock_ensure_cib_version.call_count)
self.assertFalse(env.cib_upgraded)
- @mock.patch("pcs.lib.env.ensure_cib_version")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("ensure_cib_version")
+ @patch_env("get_cib_xml")
def test_get_cib_upgrade_live(
self, mock_get_cib_xml, mock_ensure_cib_version
):
@@ -138,10 +143,18 @@ class LibraryEnvironmentTest(TestCase):
)
self.assertEqual(1, mock_get_cib_xml.call_count)
self.assertEqual(1, mock_ensure_cib_version.call_count)
+ assert_report_item_list_equal(
+ env.report_processor.report_item_list,
+ [(
+ severity.INFO,
+ report_codes.CIB_UPGRADE_SUCCESSFUL,
+ {}
+ )]
+ )
self.assertTrue(env.cib_upgraded)
- @mock.patch("pcs.lib.env.ensure_cib_version")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("ensure_cib_version")
+ @patch_env("get_cib_xml")
def test_get_cib_no_upgrade_live(
self, mock_get_cib_xml, mock_ensure_cib_version
):
@@ -155,8 +168,8 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual(1, mock_ensure_cib_version.call_count)
self.assertFalse(env.cib_upgraded)
- @mock.patch("pcs.lib.env.ensure_cib_version")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("ensure_cib_version")
+ @patch_env("get_cib_xml")
def test_get_cib_no_version_file(
self, mock_get_cib_xml, mock_ensure_cib_version
):
@@ -168,8 +181,8 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual(0, mock_ensure_cib_version.call_count)
self.assertFalse(env.cib_upgraded)
- @mock.patch("pcs.lib.env.ensure_cib_version")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("ensure_cib_version")
+ @patch_env("get_cib_xml")
def test_get_cib_upgrade_file(
self, mock_get_cib_xml, mock_ensure_cib_version
):
@@ -184,8 +197,8 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual(1, mock_ensure_cib_version.call_count)
self.assertTrue(env.cib_upgraded)
- @mock.patch("pcs.lib.env.ensure_cib_version")
- @mock.patch("pcs.lib.env.get_cib_xml")
+ @patch_env("ensure_cib_version")
+ @patch_env("get_cib_xml")
def test_get_cib_no_upgrade_file(
self, mock_get_cib_xml, mock_ensure_cib_version
):
@@ -200,7 +213,7 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual(1, mock_ensure_cib_version.call_count)
self.assertFalse(env.cib_upgraded)
- @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
+ @patch_env("replace_cib_configuration_xml")
@mock.patch.object(
LibraryEnvironment,
"cmd_runner",
@@ -210,11 +223,12 @@ class LibraryEnvironmentTest(TestCase):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
env.push_cib(etree.XML('<cib/>'))
mock_replace_cib.assert_called_once_with(
- "mock cmd runner", '<cib/>', False
+ "mock cmd runner",
+ '<cib/>'
)
self.assertEqual([], env.report_processor.report_item_list)
- @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
+ @patch_env("replace_cib_configuration_xml")
@mock.patch.object(
LibraryEnvironment,
"cmd_runner",
@@ -225,22 +239,16 @@ class LibraryEnvironmentTest(TestCase):
env._cib_upgraded = True
env.push_cib(etree.XML('<cib/>'))
mock_replace_cib.assert_called_once_with(
- "mock cmd runner", '<cib/>', True
- )
- assert_report_item_list_equal(
- env.report_processor.report_item_list,
- [(
- severity.INFO,
- report_codes.CIB_UPGRADE_SUCCESSFUL,
- {}
- )]
+ "mock cmd runner",
+ '<cib/>'
)
+ self.assertFalse(env.cib_upgraded)
- @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
- @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
- @mock.patch("pcs.lib.env.reload_corosync_config")
- @mock.patch("pcs.lib.env.distribute_corosync_conf")
- @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @patch_env("qdevice_reload_on_nodes")
+ @patch_env("check_corosync_offline_on_nodes")
+ @patch_env("reload_corosync_config")
+ @patch_env("distribute_corosync_conf")
+ @patch_env("get_local_corosync_conf")
@mock.patch.object(
LibraryEnvironment,
"node_communicator",
@@ -280,11 +288,11 @@ class LibraryEnvironmentTest(TestCase):
mock_reload.assert_not_called()
mock_qdevice_reload.assert_not_called()
- @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
- @mock.patch("pcs.lib.env.reload_corosync_config")
- @mock.patch("pcs.lib.env.is_service_running")
- @mock.patch("pcs.lib.env.distribute_corosync_conf")
- @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @patch_env("qdevice_reload_on_nodes")
+ @patch_env("reload_corosync_config")
+ @patch_env("is_service_running")
+ @patch_env("distribute_corosync_conf")
+ @patch_env("get_local_corosync_conf")
@mock.patch.object(
CorosyncConfigFacade,
"get_nodes",
@@ -330,11 +338,11 @@ class LibraryEnvironmentTest(TestCase):
mock_reload.assert_called_once_with("mock cmd runner")
mock_qdevice_reload.assert_not_called()
- @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
- @mock.patch("pcs.lib.env.reload_corosync_config")
- @mock.patch("pcs.lib.env.is_service_running")
- @mock.patch("pcs.lib.env.distribute_corosync_conf")
- @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @patch_env("qdevice_reload_on_nodes")
+ @patch_env("reload_corosync_config")
+ @patch_env("is_service_running")
+ @patch_env("distribute_corosync_conf")
+ @patch_env("get_local_corosync_conf")
@mock.patch.object(
CorosyncConfigFacade,
"get_nodes",
@@ -380,12 +388,12 @@ class LibraryEnvironmentTest(TestCase):
mock_reload.assert_not_called()
mock_qdevice_reload.assert_not_called()
- @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
- @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
- @mock.patch("pcs.lib.env.reload_corosync_config")
- @mock.patch("pcs.lib.env.is_service_running")
- @mock.patch("pcs.lib.env.distribute_corosync_conf")
- @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @patch_env("qdevice_reload_on_nodes")
+ @patch_env("check_corosync_offline_on_nodes")
+ @patch_env("reload_corosync_config")
+ @patch_env("is_service_running")
+ @patch_env("distribute_corosync_conf")
+ @patch_env("get_local_corosync_conf")
@mock.patch.object(
CorosyncConfigFacade,
"get_nodes",
@@ -436,12 +444,12 @@ class LibraryEnvironmentTest(TestCase):
False
)
- @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
- @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
- @mock.patch("pcs.lib.env.reload_corosync_config")
- @mock.patch("pcs.lib.env.is_service_running")
- @mock.patch("pcs.lib.env.distribute_corosync_conf")
- @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @patch_env("qdevice_reload_on_nodes")
+ @patch_env("check_corosync_offline_on_nodes")
+ @patch_env("reload_corosync_config")
+ @patch_env("is_service_running")
+ @patch_env("distribute_corosync_conf")
+ @patch_env("get_local_corosync_conf")
@mock.patch.object(
CorosyncConfigFacade,
"get_nodes",
@@ -487,11 +495,11 @@ class LibraryEnvironmentTest(TestCase):
mock_reload.assert_not_called()
mock_qdevice_reload.assert_not_called()
- @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
- @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
- @mock.patch("pcs.lib.env.reload_corosync_config")
- @mock.patch("pcs.lib.env.distribute_corosync_conf")
- @mock.patch("pcs.lib.env.get_local_corosync_conf")
+ @patch_env("qdevice_reload_on_nodes")
+ @patch_env("check_corosync_offline_on_nodes")
+ @patch_env("reload_corosync_config")
+ @patch_env("distribute_corosync_conf")
+ @patch_env("get_local_corosync_conf")
@mock.patch.object(
CorosyncConfigFacade,
"get_nodes",
@@ -542,43 +550,7 @@ class LibraryEnvironmentTest(TestCase):
mock_reload.assert_not_called()
mock_qdevice_reload.assert_not_called()
- @mock.patch("pcs.lib.env.CommandRunner")
- def test_cmd_runner_no_options(self, mock_runner):
- expected_runner = mock.MagicMock()
- mock_runner.return_value = expected_runner
- env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- runner = env.cmd_runner()
- self.assertEqual(expected_runner, runner)
- mock_runner.assert_called_once_with(
- self.mock_logger,
- self.mock_reporter,
- {
- "LC_ALL": "C",
- }
- )
-
- @mock.patch("pcs.lib.env.CommandRunner")
- def test_cmd_runner_all_options(self, mock_runner):
- expected_runner = mock.MagicMock()
- mock_runner.return_value = expected_runner
- user = "testuser"
- env = LibraryEnvironment(
- self.mock_logger,
- self.mock_reporter,
- user_login=user
- )
- runner = env.cmd_runner()
- self.assertEqual(expected_runner, runner)
- mock_runner.assert_called_once_with(
- self.mock_logger,
- self.mock_reporter,
- {
- "CIB_user": user,
- "LC_ALL": "C",
- }
- )
-
- @mock.patch("pcs.lib.env.NodeCommunicator")
+ @patch_env("NodeCommunicator")
def test_node_communicator_no_options(self, mock_comm):
expected_comm = mock.MagicMock()
mock_comm.return_value = expected_comm
@@ -590,22 +562,25 @@ class LibraryEnvironmentTest(TestCase):
self.mock_reporter,
{},
None,
- []
+ [],
+ None
)
- @mock.patch("pcs.lib.env.NodeCommunicator")
+ @patch_env("NodeCommunicator")
def test_node_communicator_all_options(self, mock_comm):
expected_comm = mock.MagicMock()
mock_comm.return_value = expected_comm
user = "testuser"
groups = ["some", "group"]
tokens = {"node": "token"}
+ timeout = 10
env = LibraryEnvironment(
self.mock_logger,
self.mock_reporter,
user_login=user,
user_groups=groups,
- auth_tokens_getter=lambda:tokens
+ auth_tokens_getter=lambda:tokens,
+ request_timeout=timeout
)
comm = env.node_communicator()
self.assertEqual(expected_comm, comm)
@@ -614,10 +589,11 @@ class LibraryEnvironmentTest(TestCase):
self.mock_reporter,
tokens,
user,
- groups
+ groups,
+ timeout
)
- @mock.patch("pcs.lib.env.get_local_cluster_conf")
+ @patch_env("get_local_cluster_conf")
def test_get_cluster_conf_live(self, mock_get_local_cluster_conf):
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, cluster_conf_data=None
@@ -626,7 +602,7 @@ class LibraryEnvironmentTest(TestCase):
self.assertEqual("cluster.conf data", env.get_cluster_conf_data())
mock_get_local_cluster_conf.assert_called_once_with()
- @mock.patch("pcs.lib.env.get_local_cluster_conf")
+ @patch_env("get_local_cluster_conf")
def test_get_cluster_conf_not_live(self, mock_get_local_cluster_conf):
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, cluster_conf_data="data"
@@ -657,3 +633,129 @@ class LibraryEnvironmentTest(TestCase):
)
self.assertFalse(env.is_cluster_conf_live)
+ at patch_env("CommandRunner")
+class CmdRunner(TestCase):
+ def setUp(self):
+ self.mock_logger = mock.MagicMock(logging.Logger)
+ self.mock_reporter = MockLibraryReportProcessor()
+
+ def test_no_options(self, mock_runner):
+ expected_runner = mock.MagicMock()
+ mock_runner.return_value = expected_runner
+ env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+ runner = env.cmd_runner()
+ self.assertEqual(expected_runner, runner)
+ mock_runner.assert_called_once_with(
+ self.mock_logger,
+ self.mock_reporter,
+ {
+ "LC_ALL": "C",
+ }
+ )
+
+ def test_user(self, mock_runner):
+ expected_runner = mock.MagicMock()
+ mock_runner.return_value = expected_runner
+ user = "testuser"
+ env = LibraryEnvironment(
+ self.mock_logger,
+ self.mock_reporter,
+ user_login=user
+ )
+ runner = env.cmd_runner()
+ self.assertEqual(expected_runner, runner)
+ mock_runner.assert_called_once_with(
+ self.mock_logger,
+ self.mock_reporter,
+ {
+ "CIB_user": user,
+ "LC_ALL": "C",
+ }
+ )
+
+ @patch_env("tempfile.NamedTemporaryFile")
+ def test_dump_cib_file(self, mock_tmpfile, mock_runner):
+ expected_runner = mock.MagicMock()
+ mock_runner.return_value = expected_runner
+ mock_instance = mock.MagicMock()
+ mock_instance.name = rc("file.tmp")
+ mock_tmpfile.return_value = mock_instance
+ env = LibraryEnvironment(
+ self.mock_logger,
+ self.mock_reporter,
+ cib_data="<cib />"
+ )
+ runner = env.cmd_runner()
+ self.assertEqual(expected_runner, runner)
+ mock_runner.assert_called_once_with(
+ self.mock_logger,
+ self.mock_reporter,
+ {
+ "LC_ALL": "C",
+ "CIB_file": rc("file.tmp"),
+ }
+ )
+ mock_instance.write.assert_called_once_with("<cib />")
+
+ at patch_env_object("cmd_runner", lambda self: "runner")
+class EnsureValidWait(TestCase):
+ def setUp(self):
+ self.create_env = partial(
+ LibraryEnvironment,
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+ )
+
+ @property
+ def env_live(self):
+ return self.create_env()
+
+ @property
+ def env_fake(self):
+ return self.create_env(cib_data="<cib/>")
+
+
+ def test_not_raises_if_waiting_false_no_matter_if_env_is_live(self):
+ self.env_live.ensure_wait_satisfiable(False)
+ self.env_fake.ensure_wait_satisfiable(False)
+
+ def test_raises_when_is_not_live(self):
+ env = self.env_fake
+ assert_raise_library_error(
+ lambda: env.ensure_wait_satisfiable(10),
+ (
+ severity.ERROR,
+ report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER,
+ {}
+ )
+ )
+
+ @patch_env("get_valid_timeout_seconds")
+ @patch_env("ensure_wait_for_idle_support")
+ def test_do_checks(self, ensure_wait_for_idle_support, get_valid_timeout):
+ env = self.env_live
+ env.ensure_wait_satisfiable(10)
+ ensure_wait_for_idle_support.assert_called_once_with(env.cmd_runner())
+ get_valid_timeout.assert_called_once_with(10)
+
+
+ at patch_env_object("cmd_runner", lambda self: "runner")
+ at patch_env_object("_get_wait_timeout", lambda self, wait: wait)
+ at patch_env_object("_push_cib_xml")
+ at patch_env("wait_for_idle")
+class PushCib(TestCase):
+ def setUp(self):
+ self.env = LibraryEnvironment(
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+ )
+
+ def test_run_only_push_when_without_wait(self, wait_for_idle, push_cib_xml):
+ self.env.push_cib(etree.fromstring("<cib/>"))
+ push_cib_xml.assert_called_once_with("<cib/>")
+ wait_for_idle.assert_not_called()
+
+ def test_run_wait_when_wait_specified(self, wait_for_idle, push_cib_xml):
+ self.env.push_cib(etree.fromstring("<cib/>"), 10)
+ push_cib_xml.assert_called_once_with("<cib/>")
+ wait_for_idle.assert_called_once_with(self.env.cmd_runner(), 10)
diff --git a/pcs/lib/test/test_env_file.py b/pcs/lib/test/test_env_file.py
index 754b40e..f9b7b57 100644
--- a/pcs/lib/test/test_env_file.py
+++ b/pcs/lib/test/test_env_file.py
@@ -8,8 +8,9 @@ from __future__ import (
from pcs.test.tools.pcs_unittest import TestCase
from pcs.common import report_codes
-from pcs.lib.env_file import RealFile, GhostFile
+from pcs.lib import env_file
from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.misc import create_patcher
from pcs.test.tools.assertions import(
assert_raise_library_error,
assert_report_item_list_equal
@@ -18,10 +19,29 @@ from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import mock
+patch_env_file = create_patcher(env_file)
+
+FILE_PATH = "/path/to.file"
+MISSING_PATH = "/no/existing/file.path"
+CONF_PATH = "/etc/booth/some-name.conf"
+
+class GhostFileInit(TestCase):
+ def test_is_not_binary_default(self):
+ ghost_file = env_file.GhostFile("some role", content=None)
+ self.assertFalse(ghost_file.export()["is_binary"])
+
+ def test_accepts_is_binary_attribute(self):
+ ghost_file = env_file.GhostFile(
+ "some role",
+ content=None,
+ is_binary=True
+ )
+ self.assertTrue(ghost_file.export()["is_binary"])
+
class GhostFileReadTest(TestCase):
def test_raises_when_trying_read_nonexistent_file(self):
assert_raise_library_error(
- lambda: GhostFile("some role", content=None).read(),
+ lambda: env_file.GhostFile("some role", content=None).read(),
(
severities.ERROR,
report_codes.FILE_DOES_NOT_EXIST,
@@ -31,10 +51,31 @@ class GhostFileReadTest(TestCase):
),
)
- at mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
+class GhostFileExists(TestCase):
+ def test_return_true_if_file_exists(self):
+ self.assertTrue(env_file.GhostFile("some_role", "any content").exists)
+
+ def test_return_False_if_file_exists(self):
+ self.assertFalse(env_file.GhostFile("some_role").exists)
+
+ def test_return_True_after_write(self):
+ ghost_file = env_file.GhostFile("some_role")
+ ghost_file.write("any content")
+ self.assertTrue(ghost_file.exists)
+
+class RealFileExists(TestCase):
+ @patch_env_file("os.path.exists", return_value=True)
+ def test_return_true_if_file_exists(self, exists):
+ self.assertTrue(env_file.RealFile("some role", FILE_PATH).exists)
+
+ @patch_env_file("os.path.exists", return_value=False)
+ def test_return_false_if_file_does_not_exist(self, exists):
+ self.assertFalse(env_file.RealFile("some role", FILE_PATH).exists)
+
+ at patch_env_file("os.path.exists", return_value=True)
class RealFileAssertNoConflictWithExistingTest(TestCase):
def check(self, report_processor, can_overwrite_existing=False):
- real_file = RealFile("some role", "/etc/booth/some-name.conf")
+ real_file = env_file.RealFile("some role", CONF_PATH)
real_file.assert_no_conflict_with_existing(
report_processor,
can_overwrite_existing
@@ -53,7 +94,7 @@ class RealFileAssertNoConflictWithExistingTest(TestCase):
severities.ERROR,
report_codes.FILE_ALREADY_EXISTS,
{
- "file_path": "/etc/booth/some-name.conf"
+ "file_path": CONF_PATH
},
report_codes.FORCE_FILE_OVERWRITE,
),
@@ -66,7 +107,7 @@ class RealFileAssertNoConflictWithExistingTest(TestCase):
severities.WARNING,
report_codes.FILE_ALREADY_EXISTS,
{
- "file_path": "/etc/booth/some-name.conf"
+ "file_path": CONF_PATH
},
)])
@@ -74,88 +115,93 @@ class RealFileWriteTest(TestCase):
def test_success_write_content_to_path(self):
mock_open = mock.mock_open()
mock_file_operation = mock.Mock()
- with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
- RealFile("some role", "/etc/booth/some-name.conf").write(
+ with patch_env_file("open", mock_open, create=True):
+ env_file.RealFile("some role", CONF_PATH).write(
"config content",
file_operation=mock_file_operation
)
- mock_open.assert_called_once_with("/etc/booth/some-name.conf", "w")
+ mock_open.assert_called_once_with(CONF_PATH, "w")
mock_open().write.assert_called_once_with("config content")
- mock_file_operation.assert_called_once_with(
- "/etc/booth/some-name.conf"
- )
+ mock_file_operation.assert_called_once_with(CONF_PATH)
def test_success_binary(self):
mock_open = mock.mock_open()
mock_file_operation = mock.Mock()
- with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
- RealFile("some role", "/etc/booth/some-name.conf").write(
+ with patch_env_file("open", mock_open, create=True):
+ env_file.RealFile("some role", CONF_PATH, is_binary=True).write(
"config content".encode("utf-8"),
file_operation=mock_file_operation,
- is_binary=True
)
- mock_open.assert_called_once_with("/etc/booth/some-name.conf", "wb")
+ mock_open.assert_called_once_with(CONF_PATH, "wb")
mock_open().write.assert_called_once_with(
"config content".encode("utf-8")
)
- mock_file_operation.assert_called_once_with(
- "/etc/booth/some-name.conf"
- )
+ mock_file_operation.assert_called_once_with(CONF_PATH)
def test_raises_when_could_not_write(self):
assert_raise_library_error(
lambda:
- RealFile("some role", "/no/existing/file.path").write(["content"]),
+ env_file.RealFile("some role", MISSING_PATH).write(["content"]),
(
severities.ERROR,
report_codes.FILE_IO_ERROR,
{
"reason":
- "No such file or directory: '/no/existing/file.path'"
+ "No such file or directory: '{0}'".format(MISSING_PATH)
,
}
)
)
class RealFileReadTest(TestCase):
- def test_success_read_content_from_file(self):
+ def assert_read_in_correct_mode(self, real_file, mode):
mock_open = mock.mock_open()
- with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
+ with patch_env_file("open", mock_open, create=True):
mock_open().read.return_value = "test booth\nconfig"
- self.assertEqual(
- "test booth\nconfig",
- RealFile("some role", "/path/to.file").read()
- )
+ self.assertEqual("test booth\nconfig", real_file.read())
+ mock_open.assert_has_calls([mock.call(FILE_PATH, mode)])
+
+ def test_success_read_content_from_file(self):
+ self.assert_read_in_correct_mode(
+ env_file.RealFile("some role", FILE_PATH, is_binary=False),
+ mode="r"
+ )
+
+ def test_success_read_content_from_binary_file(self):
+ self.assert_read_in_correct_mode(
+ env_file.RealFile("some role", FILE_PATH, is_binary=True),
+ mode="rb"
+ )
def test_raises_when_could_not_read(self):
assert_raise_library_error(
- lambda: RealFile("some role", "/no/existing/file.path").read(),
+ lambda: env_file.RealFile("some role", MISSING_PATH).read(),
(
severities.ERROR,
report_codes.FILE_IO_ERROR,
{
"reason":
- "No such file or directory: '/no/existing/file.path'"
+ "No such file or directory: '{0}'".format(MISSING_PATH)
,
}
)
)
class RealFileRemoveTest(TestCase):
- @mock.patch("pcs.lib.env_file.os.remove")
- @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
+ @patch_env_file("os.remove")
+ @patch_env_file("os.path.exists", return_value=True)
def test_success_remove_file(self, _, mock_remove):
- RealFile("some role", "/path/to.file").remove()
- mock_remove.assert_called_once_with("/path/to.file")
+ env_file.RealFile("some role", FILE_PATH).remove()
+ mock_remove.assert_called_once_with(FILE_PATH)
- @mock.patch(
- "pcs.lib.env_file.os.remove",
- side_effect=EnvironmentError(1, "mock remove failed", "/path/to.file")
+ @patch_env_file(
+ "os.remove",
+ side_effect=EnvironmentError(1, "mock remove failed", FILE_PATH)
)
- @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
+ @patch_env_file("os.path.exists", return_value=True)
def test_raise_library_error_when_remove_failed(self, _, dummy):
assert_raise_library_error(
- lambda: RealFile("some role", "/path/to.file").remove(),
+ lambda: env_file.RealFile("some role", FILE_PATH).remove(),
(
severities.ERROR,
report_codes.FILE_IO_ERROR,
@@ -167,10 +213,10 @@ class RealFileRemoveTest(TestCase):
)
)
- @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False)
+ @patch_env_file("os.path.exists", return_value=False)
def test_existence_is_required(self, _):
assert_raise_library_error(
- lambda: RealFile("some role", "/path/to.file").remove(),
+ lambda: env_file.RealFile("some role", FILE_PATH).remove(),
(
severities.ERROR,
report_codes.FILE_IO_ERROR,
@@ -182,6 +228,8 @@ class RealFileRemoveTest(TestCase):
)
)
- @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False)
+ @patch_env_file("os.path.exists", return_value=False)
def test_noexistent_can_be_silenced(self, _):
- RealFile("some role", "/path/to.file").remove(silence_no_existence=True)
+ env_file.RealFile("some role", FILE_PATH).remove(
+ silence_no_existence=True
+ )
diff --git a/pcs/lib/test/test_errors.py b/pcs/lib/test/test_errors.py
index 871aa76..323e9f7 100644
--- a/pcs/lib/test/test_errors.py
+++ b/pcs/lib/test/test_errors.py
@@ -7,14 +7,74 @@ from __future__ import (
from pcs.test.tools.pcs_unittest import TestCase
-from pcs.lib.errors import LibraryEnvError
+from pcs.lib import errors
class LibraryEnvErrorTest(TestCase):
def test_can_sign_solved_reports(self):
- e = LibraryEnvError("first", "second", "third")
+ e = errors.LibraryEnvError("first", "second", "third")
for report in e.args:
if report == "second":
e.sign_processed(report)
self.assertEqual(["first", "third"], e.unprocessed)
+
+class ReportListAnalyzerSelectSeverities(TestCase):
+ def setUp(self):
+ self.severities = [
+ errors.ReportItemSeverity.WARNING,
+ errors.ReportItemSeverity.INFO,
+ errors.ReportItemSeverity.DEBUG,
+ ]
+
+ def assert_select_reports(self, all_reports, expected_errors):
+ self.assertEqual(
+ expected_errors,
+ errors.ReportListAnalyzer(all_reports)
+ .reports_with_severities(self.severities)
+ )
+
+ def test_returns_empty_on_no_reports(self):
+ self.assert_select_reports([], [])
+
+ def test_returns_empty_on_reports_with_other_severities(self):
+ self.assert_select_reports([errors.ReportItem.error("ERR")], [])
+
+ def test_returns_selection_of_desired_severities(self):
+ err = errors.ReportItem.error("ERR")
+ warn = errors.ReportItem.warning("WARN")
+ info = errors.ReportItem.info("INFO")
+ debug = errors.ReportItem.debug("DEBUG")
+ self.assert_select_reports(
+ [
+ err,
+ warn,
+ info,
+ debug,
+ ],
+ [
+ warn,
+ info,
+ debug,
+ ]
+ )
+
+class ReportListAnalyzerErrorList(TestCase):
+ def assert_select_reports(self, all_reports, expected_errors):
+ self.assertEqual(
+ expected_errors,
+ errors.ReportListAnalyzer(all_reports).error_list
+ )
+
+ def test_returns_empty_on_no_reports(self):
+ self.assert_select_reports([], [])
+
+ def test_returns_empty_on_no_errors(self):
+ self.assert_select_reports([errors.ReportItem.warning("WARN")], [])
+
+ def test_returns_only_errors_on_mixed_content(self):
+ err = errors.ReportItem.error("ERR")
+ self.assert_select_reports(
+ [errors.ReportItem.warning("WARN"), err],
+ [err]
+ )
diff --git a/pcs/lib/test/test_node_communication_format.py b/pcs/lib/test/test_node_communication_format.py
new file mode 100644
index 0000000..0cad76f
--- /dev/null
+++ b/pcs/lib/test/test_node_communication_format.py
@@ -0,0 +1,119 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import create_setup_patch_mixin
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.common import report_codes
+from pcs.lib import node_communication_format
+from pcs.lib.errors import ReportItemSeverity as severity
+
+SetupPatchMixin = create_setup_patch_mixin(node_communication_format)
+
+class PcmkAuthkeyFormat(TestCase, SetupPatchMixin):
+ def test_create_expected_dict(self):
+ b64encode = self.setup_patch("base64.b64encode")
+ b64encode.return_value = "encoded_content".encode()
+ self.assertEqual(
+ node_communication_format.pcmk_authkey_format("content"),
+ {
+ "data": b64encode.return_value.decode("utf-8"),
+ "type": "pcmk_remote_authkey",
+ "rewrite_existing": True,
+ }
+ )
+
+
+class ServiceCommandFormat(TestCase):
+ def test_create_expected_dict(self):
+ self.assertEqual(
+ node_communication_format.service_cmd_format("pcsd", "start"),
+ {
+ "type": "service_command",
+ "service": "pcsd",
+ "command": "start",
+ }
+ )
+
+def fixture_invalid_response_format(node_label):
+ return (
+ severity.ERROR,
+ report_codes.INVALID_RESPONSE_FORMAT,
+ {
+ "node": node_label
+ },
+ None
+ )
+
+
+class ResponseToNodeActionResults(TestCase):
+ def setUp(self):
+ self.expected_keys = ["file"]
+ self.main_key = "files"
+ self.node_label = "node1"
+
+ def assert_result_causes_invalid_format(self, result):
+ assert_raise_library_error(
+ lambda: node_communication_format.response_to_result(
+ result,
+ self.main_key,
+ self.expected_keys,
+ self.node_label,
+ ),
+ fixture_invalid_response_format(self.node_label)
+ )
+
+ def test_report_response_is_not_dict(self):
+ self.assert_result_causes_invalid_format("bad answer")
+
+ def test_report_dict_without_mandatory_key(self):
+ self.assert_result_causes_invalid_format({})
+
+ def test_report_when_on_files_is_not_dict(self):
+ self.assert_result_causes_invalid_format({"files": True})
+
+ def test_report_when_on_some_result_is_not_dict(self):
+ self.assert_result_causes_invalid_format({
+ "files": {
+ "file": True
+ }
+ })
+
+ def test_report_when_on_some_result_is_without_code(self):
+ self.assert_result_causes_invalid_format({
+ "files": {
+ "file": {"message": "some_message"}
+ }
+ })
+
+ def test_report_when_on_some_result_is_without_message(self):
+ self.assert_result_causes_invalid_format({
+ "files": {
+ "file": {"code": "some_code"}
+ }
+ })
+
+ def test_report_when_some_result_key_is_missing(self):
+ self.assert_result_causes_invalid_format({
+ "files": {
+ }
+ })
+
+ def test_report_when_some_result_key_is_extra(self):
+ self.assert_result_causes_invalid_format({
+ "files": {
+ "file": {
+ "code": "some_code",
+ "message": "some_message",
+ },
+ "extra": {
+ "code": "some_extra_code",
+ "message": "some_extra_message",
+ }
+ }
+ })
diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/lib/test/test_nodes_task.py
similarity index 68%
rename from pcs/test/test_lib_nodes_task.py
rename to pcs/lib/test/test_nodes_task.py
index 6f05b15..61ba132 100644
--- a/pcs/test/test_lib_nodes_task.py
+++ b/pcs/lib/test/test_nodes_task.py
@@ -5,6 +5,8 @@ from __future__ import (
unicode_literals,
)
+import json
+
from pcs.test.tools.pcs_unittest import TestCase
from pcs.test.tools.assertions import (
@@ -13,6 +15,7 @@ from pcs.test.tools.assertions import (
)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.misc import create_patcher
from pcs.common import report_codes
from pcs.lib.external import NodeCommunicator, NodeAuthenticationException
@@ -21,13 +24,14 @@ from pcs.lib.errors import ReportItemSeverity as severity
import pcs.lib.nodes_task as lib
+patch_nodes_task = create_patcher(lib)
class DistributeCorosyncConfTest(TestCase):
def setUp(self):
self.mock_reporter = MockLibraryReportProcessor()
self.mock_communicator = "mock node communicator"
- @mock.patch("pcs.lib.nodes_task.corosync_live")
+ @patch_nodes_task("corosync_live")
def test_success(self, mock_corosync_live):
conf_text = "test conf text"
nodes = ["node1", "node2"]
@@ -81,7 +85,7 @@ class DistributeCorosyncConfTest(TestCase):
]
)
- @mock.patch("pcs.lib.nodes_task.corosync_live")
+ @patch_nodes_task("corosync_live")
def test_one_node_down(self, mock_corosync_live):
conf_text = "test conf text"
nodes = ["node1", "node2"]
@@ -174,7 +178,7 @@ class DistributeCorosyncConfTest(TestCase):
]
)
- @mock.patch("pcs.lib.nodes_task.corosync_live")
+ @patch_nodes_task("corosync_live")
def test_one_node_down_forced(self, mock_corosync_live):
conf_text = "test conf text"
nodes = ["node1", "node2"]
@@ -286,14 +290,18 @@ class CheckCorosyncOfflineTest(TestCase):
)
def test_one_node_running(self):
- nodes = ["node1", "node2"]
+ node_responses = {
+ "node1": '{"corosync": false}',
+ "node2": '{"corosync": true}',
+ }
node_addrs_list = NodeAddressesList(
- [NodeAddresses(addr) for addr in nodes]
+ [NodeAddresses(addr) for addr in node_responses.keys()]
+ )
+
+ self.mock_communicator.call_node.side_effect = (
+ lambda node, request, data: node_responses[node.label]
)
- self.mock_communicator.call_node.side_effect = [
- '{"corosync": false}',
- '{"corosync": true}',
- ]
+
assert_raise_library_error(
lambda: lib.check_corosync_offline_on_nodes(
@@ -305,7 +313,7 @@ class CheckCorosyncOfflineTest(TestCase):
severity.ERROR,
report_codes.COROSYNC_RUNNING_ON_NODE,
{
- "node": nodes[1],
+ "node": "node2",
}
)
)
@@ -438,8 +446,8 @@ class CheckCorosyncOfflineTest(TestCase):
)
- at mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_stop")
- at mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_start")
+ at patch_nodes_task("qdevice_client.remote_client_stop")
+ at patch_nodes_task("qdevice_client.remote_client_start")
class QdeviceReloadOnNodesTest(TestCase):
def setUp(self):
self.mock_reporter = MockLibraryReportProcessor()
@@ -493,7 +501,7 @@ class QdeviceReloadOnNodesTest(TestCase):
raise NodeAuthenticationException(
node.label, "command", "HTTP error: 401"
)
- mock_remote_stop.side_effect = raiser
+ mock_remote_start.side_effect = raiser
assert_raise_library_error(
lambda: lib.qdevice_reload_on_nodes(
@@ -501,6 +509,24 @@ class QdeviceReloadOnNodesTest(TestCase):
self.mock_reporter,
node_addrs_list
),
+ # why the same error twice?
+ # 1. Tested piece of code calls a function which puts an error
+ # into the reporter. The reporter raises an exception. The
+ # exception is caught in the tested piece of code, stored, and
+ # later put to reporter again.
+ # 2. Mock reporter remembers everything that goes through it
+ # and by the machanism described in 1 the error goes througt it
+ # twice.
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ {
+ "node": nodes[1],
+ "command": "command",
+ "reason" : "HTTP error: 401",
+ },
+ report_codes.SKIP_OFFLINE_NODES
+ ),
(
severity.ERROR,
report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
@@ -574,3 +600,231 @@ class NodeCheckAuthTest(TestCase):
mock_communicator.call_node.assert_called_once_with(
node, "remote/check_auth", "check_auth_only=1"
)
+
+
+def fixture_invalid_response_format(node_label):
+ return (
+ severity.ERROR,
+ report_codes.INVALID_RESPONSE_FORMAT,
+ {
+ "node": node_label
+ },
+ None
+ )
+
+def assert_call_cause_reports(call, expected_report_items):
+ report_items = []
+ call(report_items)
+ assert_report_item_list_equal(report_items, expected_report_items)
+
+class CallForJson(TestCase):
+ def setUp(self):
+ self.node = NodeAddresses("node1")
+ self.node_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+
+ def make_call(self, report_items):
+ lib._call_for_json(
+ self.node_communicator,
+ self.node,
+ "some/path",
+ report_items
+ )
+
+ def test_report_no_json_response(self):
+ #leads to ValueError
+ self.node_communicator.call_node = mock.Mock(return_value="bad answer")
+ assert_call_cause_reports(self.make_call, [
+ fixture_invalid_response_format(self.node.label)
+ ])
+
+ def test_process_communication_exception(self):
+ self.node_communicator.call_node = mock.Mock(
+ side_effect=NodeAuthenticationException("node", "request", "reason")
+ )
+ assert_call_cause_reports(self.make_call, [
+ (
+ severity.ERROR,
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+ {
+ 'node': 'node',
+ 'reason': 'reason',
+ 'command': 'request'
+ },
+ report_codes.SKIP_OFFLINE_NODES,
+ )
+ ])
+
+class AvailabilityCheckerNode(TestCase):
+ def setUp(self):
+ self.node = "node1"
+
+ def assert_result_causes_reports(
+ self, availability_info, expected_report_items
+ ):
+ report_items = []
+ lib.availability_checker_node(
+ availability_info,
+ report_items,
+ self.node
+ )
+ assert_report_item_list_equal(report_items, expected_report_items)
+
+ def test_no_reports_when_available(self):
+ self.assert_result_causes_reports({"node_available": True}, [])
+
+ def test_report_node_is_in_cluster(self):
+ self.assert_result_causes_reports({"node_available": False}, [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
+ {
+ "node": self.node
+ }
+ ),
+ ])
+
+ def test_report_node_is_running_pacemaker_remote(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_remote": True},
+ [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
+ {
+ "node": self.node,
+ "service": "pacemaker_remote",
+ }
+ ),
+ ]
+ )
+
+ def test_report_node_is_running_pacemaker(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_running": True},
+ [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
+ {
+ "node": self.node,
+ "service": "pacemaker",
+ }
+ ),
+ ]
+ )
+
+class AvailabilityCheckerRemoteNode(TestCase):
+ def setUp(self):
+ self.node = "node1"
+
+ def assert_result_causes_reports(
+ self, availability_info, expected_report_items
+ ):
+ report_items = []
+ lib.availability_checker_remote_node(
+ availability_info,
+ report_items,
+ self.node
+ )
+ assert_report_item_list_equal(report_items, expected_report_items)
+
+ def test_no_reports_when_available(self):
+ self.assert_result_causes_reports({"node_available": True}, [])
+
+ def test_report_node_is_running_pacemaker(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_running": True},
+ [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE,
+ {
+ "node": self.node,
+ "service": "pacemaker",
+ }
+ ),
+ ]
+ )
+
+ def test_report_node_is_in_cluster(self):
+ self.assert_result_causes_reports({"node_available": False}, [
+ (
+ severity.ERROR,
+ report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER,
+ {
+ "node": self.node
+ }
+ ),
+ ])
+
+ def test_no_reports_when_pacemaker_remote_there(self):
+ self.assert_result_causes_reports(
+ {"node_available": False, "pacemaker_remote": True},
+ []
+ )
+
+class CheckCanAddNodeToCluster(TestCase):
+ def setUp(self):
+ self.node = NodeAddresses("node1")
+ self.node_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+
+ def assert_result_causes_invalid_format(self, result):
+ self.node_communicator.call_node = mock.Mock(
+ return_value=json.dumps(result)
+ )
+ assert_call_cause_reports(
+ self.make_call,
+ [fixture_invalid_response_format(self.node.label)],
+ )
+
+ def make_call(self, report_items):
+ lib.check_can_add_node_to_cluster(
+ self.node_communicator,
+ self.node,
+ report_items,
+ check_response=(
+ lambda availability_info, report_items, node_label: None
+ )
+ )
+
+ def test_report_no_dict_in_json_response(self):
+ self.assert_result_causes_invalid_format("bad answer")
+
+ def test_report_dict_without_mandatory_key(self):
+ self.assert_result_causes_invalid_format({})
+
+
+class OnNodeTest(TestCase):
+ def setUp(self):
+ self.reporter = MockLibraryReportProcessor()
+ self.node = NodeAddresses("node1")
+ self.node_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+
+ def set_call_result(self, result):
+ self.node_communicator.call_node = mock.Mock(
+ return_value=json.dumps(result)
+ )
+
+class RunActionOnNode(OnNodeTest):
+ def make_call(self):
+ return lib.run_actions_on_node(
+ self.node_communicator,
+ "remote/run_action",
+ "actions",
+ self.reporter,
+ self.node,
+ {"action": {"type": "any_mock_type"}}
+ )
+
+ def test_return_node_action_result(self):
+ self.set_call_result({
+ "actions": {
+ "action": {
+ "code": "some_code",
+ "message": "some_message",
+ }
+ }
+ })
+ result = self.make_call()["action"]
+ self.assertEqual(result.code, "some_code")
+ self.assertEqual(result.message, "some_message")
diff --git a/pcs/lib/test/test_resource_agent.py b/pcs/lib/test/test_resource_agent.py
index 5298415..d821f4d 100644
--- a/pcs/lib/test/test_resource_agent.py
+++ b/pcs/lib/test/test_resource_agent.py
@@ -11,7 +11,9 @@ from functools import partial
from pcs.test.tools.assertions import (
ExtendedAssertionsMixin,
assert_raise_library_error,
+ assert_report_item_list_equal,
assert_xml_equal,
+ start_tag_error_text,
)
from pcs.test.tools.misc import create_patcher
from pcs.test.tools.pcs_unittest import TestCase, mock
@@ -23,6 +25,94 @@ from pcs.lib.errors import ReportItemSeverity as severity, LibraryError
from pcs.lib.external import CommandRunner
patch_agent = create_patcher("pcs.lib.resource_agent")
+patch_agent_object = partial(mock.patch.object, lib_ra.Agent)
+
+
+class GetDefaultInterval(TestCase):
+ def test_return_0s_on_name_different_from_monitor(self):
+ self.assertEqual("0s", lib_ra.get_default_interval("start"))
+ def test_return_60s_on_monitor(self):
+ self.assertEqual("60s", lib_ra.get_default_interval("monitor"))
+
+
+ at patch_agent("get_default_interval", mock.Mock(return_value="10s"))
+class CompleteAllIntervals(TestCase):
+ def test_add_intervals_everywhere_is_missing(self):
+ self.assertEqual(
+ [
+ {"name": "monitor", "interval": "20s"},
+ {"name": "start", "interval": "10s"},
+ ],
+ lib_ra.complete_all_intervals([
+ {"name": "monitor", "interval": "20s"},
+ {"name": "start"},
+ ])
+ )
+
+class GetResourceAgentNameFromString(TestCase):
+ def test_returns_resource_agent_name_when_is_valid(self):
+ self.assertEqual(
+ lib_ra.ResourceAgentName("ocf", "heartbeat", "Dummy"),
+ lib_ra.get_resource_agent_name_from_string("ocf:heartbeat:Dummy")
+ )
+
+ def test_refuses_string_if_is_not_valid(self):
+ self.assertRaises(
+ lib_ra.InvalidResourceAgentName,
+ lambda: lib_ra.get_resource_agent_name_from_string(
+ "invalid:resource:agent:string"
+ )
+ )
+
+ def test_refuses_with_unknown_standard(self):
+ self.assertRaises(
+ lib_ra.InvalidResourceAgentName,
+ lambda: lib_ra.get_resource_agent_name_from_string("unknown:Dummy")
+ )
+
+ def test_refuses_ocf_agent_name_without_provider(self):
+ self.assertRaises(
+ lib_ra.InvalidResourceAgentName,
+ lambda: lib_ra.get_resource_agent_name_from_string("ocf:Dummy")
+ )
+
+ def test_refuses_non_ocf_agent_name_with_provider(self):
+ self.assertRaises(
+ lib_ra.InvalidResourceAgentName,
+ lambda:
+ lib_ra.get_resource_agent_name_from_string("lsb:provider:Dummy")
+ )
+
+ def test_returns_resource_agent_containing_sytemd_instance(self):
+ self.assertEqual(
+ lib_ra.ResourceAgentName("systemd", None, "lvm2-pvscan at 252:2"),
+ lib_ra.get_resource_agent_name_from_string(
+ "systemd:lvm2-pvscan at 252:2"
+ )
+ )
+
+ def test_returns_resource_agent_containing_service_instance(self):
+ self.assertEqual(
+ lib_ra.ResourceAgentName("service", None, "lvm2-pvscan at 252:2"),
+ lib_ra.get_resource_agent_name_from_string(
+ "service:lvm2-pvscan at 252:2"
+ )
+ )
+
+ def test_returns_resource_agent_containing_systemd_instance_short(self):
+ self.assertEqual(
+ lib_ra.ResourceAgentName("service", None, "getty at tty1"),
+ lib_ra.get_resource_agent_name_from_string("service:getty at tty1")
+ )
+
+ def test_refuses_systemd_agent_name_with_provider(self):
+ self.assertRaises(
+ lib_ra.InvalidResourceAgentName,
+ lambda: lib_ra.get_resource_agent_name_from_string(
+ "sytemd:lvm2-pvscan252:@2"
+ )
+ )
+
class ListResourceAgentsStandardsTest(TestCase):
def test_success_and_filter_stonith_out(self):
@@ -57,7 +147,6 @@ class ListResourceAgentsStandardsTest(TestCase):
"/usr/sbin/crm_resource", "--list-standards"
])
-
def test_success_filter_whitespace(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
agents = [
@@ -93,7 +182,6 @@ class ListResourceAgentsStandardsTest(TestCase):
"/usr/sbin/crm_resource", "--list-standards"
])
-
def test_empty(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = ("", "", 0)
@@ -107,7 +195,6 @@ class ListResourceAgentsStandardsTest(TestCase):
"/usr/sbin/crm_resource", "--list-standards"
])
-
def test_error(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = ("lsb", "error", 1)
@@ -152,7 +239,6 @@ class ListResourceAgentsOcfProvidersTest(TestCase):
"/usr/sbin/crm_resource", "--list-ocf-providers"
])
-
def test_success_filter_whitespace(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
providers = [
@@ -183,7 +269,6 @@ class ListResourceAgentsOcfProvidersTest(TestCase):
"/usr/sbin/crm_resource", "--list-ocf-providers"
])
-
def test_empty(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = ("", "", 0)
@@ -197,7 +282,6 @@ class ListResourceAgentsOcfProvidersTest(TestCase):
"/usr/sbin/crm_resource", "--list-ocf-providers"
])
-
def test_error(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = ("booth", "error", 1)
@@ -294,7 +378,6 @@ class ListResourceAgentsTest(TestCase):
"/usr/sbin/crm_resource", "--list-agents", "ocf"
])
-
def test_success_standard_provider(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = (
@@ -325,7 +408,6 @@ class ListResourceAgentsTest(TestCase):
"/usr/sbin/crm_resource", "--list-agents", "ocf:pacemaker"
])
-
def test_bad_standard(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = (
@@ -375,7 +457,6 @@ class ListStonithAgentsTest(TestCase):
"/usr/sbin/crm_resource", "--list-agents", "stonith"
])
-
def test_no_agents(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = (
@@ -393,7 +474,6 @@ class ListStonithAgentsTest(TestCase):
"/usr/sbin/crm_resource", "--list-agents", "stonith"
])
-
def test_filter_hidden_agents(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.return_value = (
@@ -449,7 +529,6 @@ class GuessResourceAgentFullNameTest(TestCase):
("Dummy\nStateful\n", "", 0),
]
-
def test_one_agent_list(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = (
@@ -468,7 +547,6 @@ class GuessResourceAgentFullNameTest(TestCase):
["ocf:heartbeat:Delay"]
)
-
def test_one_agent_exception(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = (
@@ -487,7 +565,6 @@ class GuessResourceAgentFullNameTest(TestCase):
"ocf:heartbeat:Delay"
)
-
def test_two_agents_list(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = (
@@ -507,7 +584,6 @@ class GuessResourceAgentFullNameTest(TestCase):
["ocf:heartbeat:Dummy", "ocf:pacemaker:Dummy"]
)
-
def test_two_agents_one_valid_list(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = (
@@ -527,7 +603,6 @@ class GuessResourceAgentFullNameTest(TestCase):
["ocf:heartbeat:Dummy"]
)
-
def test_two_agents_exception(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = (
@@ -557,7 +632,6 @@ class GuessResourceAgentFullNameTest(TestCase):
),
)
-
def test_no_agents_list(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = self.mock_runner_side_effect
@@ -567,7 +641,6 @@ class GuessResourceAgentFullNameTest(TestCase):
[]
)
-
def test_no_agents_exception(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = self.mock_runner_side_effect
@@ -586,7 +659,6 @@ class GuessResourceAgentFullNameTest(TestCase):
),
)
-
def test_no_valids_agent_list(self):
mock_runner = mock.MagicMock(spec_set=CommandRunner)
mock_runner.run.side_effect = (
@@ -603,14 +675,13 @@ class GuessResourceAgentFullNameTest(TestCase):
)
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at patch_agent_object("_get_metadata")
class AgentMetadataGetShortdescTest(TestCase):
def setUp(self):
self.agent = lib_ra.Agent(
mock.MagicMock(spec_set=CommandRunner)
)
-
def test_no_desc(self, mock_metadata):
xml = '<resource-agent />'
mock_metadata.return_value = etree.XML(xml)
@@ -619,7 +690,6 @@ class AgentMetadataGetShortdescTest(TestCase):
""
)
-
def test_shortdesc_attribute(self, mock_metadata):
xml = '<resource-agent shortdesc="short description" />'
mock_metadata.return_value = etree.XML(xml)
@@ -628,7 +698,6 @@ class AgentMetadataGetShortdescTest(TestCase):
"short description"
)
-
def test_shortdesc_element(self, mock_metadata):
xml = """
<resource-agent>
@@ -642,14 +711,13 @@ class AgentMetadataGetShortdescTest(TestCase):
)
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at patch_agent_object("_get_metadata")
class AgentMetadataGetLongdescTest(TestCase):
def setUp(self):
self.agent = lib_ra.Agent(
mock.MagicMock(spec_set=CommandRunner)
)
-
def test_no_desc(self, mock_metadata):
xml = '<resource-agent />'
mock_metadata.return_value = etree.XML(xml)
@@ -658,7 +726,6 @@ class AgentMetadataGetLongdescTest(TestCase):
""
)
-
def test_longesc_element(self, mock_metadata):
xml = """
<resource-agent>
@@ -672,14 +739,13 @@ class AgentMetadataGetLongdescTest(TestCase):
)
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at patch_agent_object("_get_metadata")
class AgentMetadataGetParametersTest(TestCase):
def setUp(self):
self.agent = lib_ra.Agent(
mock.MagicMock(spec_set=CommandRunner)
)
-
def test_no_parameters(self, mock_metadata):
xml = """
<resource-agent>
@@ -691,7 +757,6 @@ class AgentMetadataGetParametersTest(TestCase):
[]
)
-
def test_empty_parameters(self, mock_metadata):
xml = """
<resource-agent>
@@ -704,7 +769,6 @@ class AgentMetadataGetParametersTest(TestCase):
[]
)
-
def test_empty_parameter(self, mock_metadata):
xml = """
<resource-agent>
@@ -725,6 +789,8 @@ class AgentMetadataGetParametersTest(TestCase):
"required": False,
"default": None,
"advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
}
]
)
@@ -756,6 +822,8 @@ class AgentMetadataGetParametersTest(TestCase):
"required": True,
"default": "default_value",
"advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
},
{
"name": "another parameter",
@@ -765,19 +833,46 @@ class AgentMetadataGetParametersTest(TestCase):
"required": False,
"default": None,
"advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
}
]
)
+ def test_remove_obsoletes_keep_deprecated(self, mock_metadata):
+ xml = """
+ <resource-agent>
+ <parameters>
+ <parameter name="obsoletes" obsoletes="deprecated"/>
+ <parameter name="deprecated" deprecated="1"/>
+ </parameters>
+ </resource-agent>
+ """
+ mock_metadata.return_value = etree.XML(xml)
+ self.assertEqual(
+ self.agent.get_parameters(),
+ [
+ {
+ "name": "deprecated",
+ "longdesc": "",
+ "shortdesc": "",
+ "type": "string",
+ "required": False,
+ "default": None,
+ "advanced": False,
+ "deprecated": True,
+ "obsoletes": None,
+ },
+ ]
+ )
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at patch_agent_object("_get_metadata")
class AgentMetadataGetActionsTest(TestCase):
def setUp(self):
self.agent = lib_ra.Agent(
mock.MagicMock(spec_set=CommandRunner)
)
-
def test_no_actions(self, mock_metadata):
xml = """
<resource-agent>
@@ -789,7 +884,6 @@ class AgentMetadataGetActionsTest(TestCase):
[]
)
-
def test_empty_actions(self, mock_metadata):
xml = """
<resource-agent>
@@ -802,7 +896,6 @@ class AgentMetadataGetActionsTest(TestCase):
[]
)
-
def test_empty_action(self, mock_metadata):
xml = """
<resource-agent>
@@ -817,7 +910,6 @@ class AgentMetadataGetActionsTest(TestCase):
[{}]
)
-
def test_more_actions(self, mock_metadata):
xml = """
<resource-agent>
@@ -843,9 +935,101 @@ class AgentMetadataGetActionsTest(TestCase):
]
)
+ def test_remove_depth_with_0(self, mock_metadata):
+ xml = """
+ <resource-agent>
+ <actions>
+ <action name="monitor" timeout="20" depth="0"/>
+ </actions>
+ </resource-agent>
+ """
+ mock_metadata.return_value = etree.XML(xml)
+ self.assertEqual(
+ self.agent.get_actions(),
+ [
+ {
+ "name": "monitor",
+ "timeout": "20"
+ },
+ ]
+ )
+
+ def test_transfor_depth_to_OCF_CHECK_LEVEL(self, mock_metadata):
+ xml = """
+ <resource-agent>
+ <actions>
+ <action name="monitor" timeout="20" depth="1"/>
+ </actions>
+ </resource-agent>
+ """
+ mock_metadata.return_value = etree.XML(xml)
+ self.assertEqual(
+ self.agent.get_actions(),
+ [
+ {
+ "name": "monitor",
+ "timeout": "20",
+ "OCF_CHECK_LEVEL": "1",
+ },
+ ]
+ )
+
+
+ at patch_agent_object("DEFAULT_CIB_ACTION_NAMES", ["monitor", "start"])
+ at patch_agent_object("get_actions")
+class AgentMetadataGetCibDefaultActions(TestCase):
+ def setUp(self):
+ self.agent = lib_ra.Agent(
+ mock.MagicMock(spec_set=CommandRunner)
+ )
+
+ def test_select_only_actions_for_cib(self, get_actions):
+ get_actions.return_value = [
+ {"name": "metadata"},
+ {"name": "start", "interval": "40s"},
+ {"name": "monitor", "interval": "10s", "timeout": "30s"},
+ ]
+ self.assertEqual(
+ [
+ {"name": "start", "interval": "40s"},
+ {"name": "monitor", "interval": "10s", "timeout": "30s"}
+ ],
+ self.agent.get_cib_default_actions()
+ )
+
+ def test_complete_monitor(self, get_actions):
+ get_actions.return_value = [{"name": "metadata"}]
+ self.assertEqual(
+ [{"name": "monitor", "interval": "60s"}],
+ self.agent.get_cib_default_actions()
+ )
+
+ def test_complete_intervals(self, get_actions):
+ get_actions.return_value = [
+ {"name": "metadata"},
+ {"name": "monitor", "timeout": "30s"},
+ ]
+ self.assertEqual(
+ [{"name": "monitor", "interval": "60s", "timeout": "30s"}],
+ self.agent.get_cib_default_actions()
+ )
+
+ def test_select_only_necessary_actions_for_cib(self, get_actions):
+ get_actions.return_value = [
+ {"name": "metadata"},
+ {"name": "start", "interval": "40s"},
+ {"name": "monitor", "interval": "10s", "timeout": "30s"},
+ ]
+ self.assertEqual(
+ [
+ {"name": "monitor", "interval": "10s", "timeout": "30s"}
+ ],
+ self.agent.get_cib_default_actions(necessary_only=True)
+ )
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
- at mock.patch.object(lib_ra.Agent, "get_name", lambda self: "agent-name")
+
+ at patch_agent_object("_get_metadata")
+ at patch_agent_object("get_name", lambda self: "agent-name")
class AgentMetadataGetInfoTest(TestCase):
def setUp(self):
self.agent = lib_ra.Agent(
@@ -872,7 +1056,6 @@ class AgentMetadataGetInfoTest(TestCase):
</resource-agent>
""")
-
def test_name_info(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -886,7 +1069,6 @@ class AgentMetadataGetInfoTest(TestCase):
}
)
-
def test_description_info(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -900,7 +1082,6 @@ class AgentMetadataGetInfoTest(TestCase):
}
)
-
def test_full_info(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -918,6 +1099,8 @@ class AgentMetadataGetInfoTest(TestCase):
"required": True,
"default": "default_value",
"advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
},
{
"name": "another parameter",
@@ -927,6 +1110,8 @@ class AgentMetadataGetInfoTest(TestCase):
"required": False,
"default": None,
"advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
}
],
"actions": [
@@ -936,11 +1121,12 @@ class AgentMetadataGetInfoTest(TestCase):
},
{"name": "off"},
],
+ "default_actions": [{"name": "monitor", "interval": "60s"}],
}
)
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at patch_agent_object("_get_metadata")
class AgentMetadataValidateParametersValuesTest(TestCase):
def setUp(self):
self.agent = lib_ra.Agent(
@@ -964,7 +1150,6 @@ class AgentMetadataValidateParametersValuesTest(TestCase):
</resource-agent>
""")
-
def test_all_required(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -975,7 +1160,6 @@ class AgentMetadataValidateParametersValuesTest(TestCase):
([], [])
)
-
def test_all_required_and_optional(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -987,7 +1171,6 @@ class AgentMetadataValidateParametersValuesTest(TestCase):
([], [])
)
-
def test_all_required_and_invalid(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -999,7 +1182,6 @@ class AgentMetadataValidateParametersValuesTest(TestCase):
(["invalid_param"], [])
)
-
def test_missing_required(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -1008,7 +1190,6 @@ class AgentMetadataValidateParametersValuesTest(TestCase):
([], ["required_param", "another_required_param"])
)
-
def test_missing_required_and_invalid(self, mock_metadata):
mock_metadata.return_value = self.metadata
self.assertEqual(
@@ -1019,13 +1200,226 @@ class AgentMetadataValidateParametersValuesTest(TestCase):
(["invalid_param"], ["required_param"])
)
+ def test_ignore_obsoletes_use_deprecated(self, mock_metadata):
+ xml = """
+ <resource-agent>
+ <parameters>
+ <parameter name="obsoletes" obsoletes="deprecated"
+ required="1"
+ />
+ <parameter name="deprecated" deprecated="1" required="1"/>
+ </parameters>
+ </resource-agent>
+ """
+ mock_metadata.return_value = etree.XML(xml)
+ self.assertEqual(
+ self.agent.validate_parameters_values({
+ }),
+ ([], ["deprecated"])
+ )
+
+ def test_dont_allow_obsoletes_use_deprecated(self, mock_metadata):
+ xml = """
+ <resource-agent>
+ <parameters>
+ <parameter name="obsoletes" obsoletes="deprecated"
+ required="1"
+ />
+ <parameter name="deprecated" deprecated="1" required="1"/>
+ </parameters>
+ </resource-agent>
+ """
+ mock_metadata.return_value = etree.XML(xml)
+ self.assertEqual(
+ self.agent.validate_parameters_values({
+ "obsoletes": "value",
+ }),
+ (["obsoletes"], ["deprecated"])
+ )
+
+
+class AgentMetadataValidateParameters(TestCase):
+ def setUp(self):
+ self.agent = lib_ra.Agent(mock.MagicMock(spec_set=CommandRunner))
+ self.metadata = etree.XML("""
+ <resource-agent>
+ <parameters>
+ <parameter name="test_param" required="0">
+ <longdesc>Long description</longdesc>
+ <shortdesc>short description</shortdesc>
+ <content type="string" default="default_value" />
+ </parameter>
+ <parameter name="required_param" required="1">
+ <content type="boolean" />
+ </parameter>
+ <parameter name="another_required_param" required="1">
+ <content type="string" />
+ </parameter>
+ </parameters>
+ </resource-agent>
+ """)
+ patcher = patch_agent_object("_get_metadata")
+ self.addCleanup(patcher.stop)
+ self.get_metadata = patcher.start()
+ self.get_metadata.return_value = self.metadata
+
+ def test_returns_empty_report_when_all_required_there(self):
+ self.assertEqual(
+ [],
+ self.agent.validate_parameters({
+ "another_required_param": "value1",
+ "required_param": "value2",
+ }),
+ )
+
+ def test_returns_empty_report_when_all_required_and_optional_there(self):
+ self.assertEqual(
+ [],
+ self.agent.validate_parameters({
+ "another_required_param": "value1",
+ "required_param": "value2",
+ "test_param": "value3",
+ })
+ )
+
+ def test_report_invalid_option(self):
+ assert_report_item_list_equal(
+ self.agent.validate_parameters({
+ "another_required_param": "value1",
+ "required_param": "value2",
+ "invalid_param": "value3",
+ }),
+ [
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["invalid_param"],
+ "option_type": "resource agent parameter",
+ "allowed": [
+ "another_required_param",
+ "required_param",
+ "test_param",
+ ]
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ ],
+ )
+
+ def test_report_missing_option(self):
+ assert_report_item_list_equal(
+ self.agent.validate_parameters({}),
+ [
+ (
+ severity.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": [
+ "required_param",
+ "another_required_param",
+ ],
+ "option_type": "resource agent parameter",
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ ],
+ )
+
+ def test_warn_missing_required(self):
+ assert_report_item_list_equal(
+ self.agent.validate_parameters({}, allow_invalid=True),
+ [
+ (
+ severity.WARNING,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": [
+ "required_param",
+ "another_required_param",
+ ],
+ "option_type": "resource agent parameter",
+ },
+ ),
+ ]
+ )
+
+ def test_ignore_obsoletes_use_deprecated(self):
+ xml = """
+ <resource-agent>
+ <parameters>
+ <parameter name="obsoletes" obsoletes="deprecated"
+ required="1"
+ />
+ <parameter name="deprecated" deprecated="1" required="1"/>
+ </parameters>
+ </resource-agent>
+ """
+ self.get_metadata.return_value = etree.XML(xml)
+ assert_report_item_list_equal(
+ self.agent.validate_parameters({}),
+ [
+ (
+ severity.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": [
+ "deprecated",
+ ],
+ "option_type": "resource agent parameter",
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ ]
+ )
+
+ def test_dont_allow_obsoletes_use_deprecated(self):
+ xml = """
+ <resource-agent>
+ <parameters>
+ <parameter name="obsoletes" obsoletes="deprecated"
+ required="1"
+ />
+ <parameter name="deprecated" deprecated="1" required="1"/>
+ </parameters>
+ </resource-agent>
+ """
+ self.get_metadata.return_value = etree.XML(xml)
+ assert_report_item_list_equal(
+ self.agent.validate_parameters({"obsoletes": "value"}),
+ [
+ (
+ severity.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": [
+ "deprecated",
+ ],
+ "option_type": "resource agent parameter",
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ (
+ severity.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["obsoletes"],
+ "option_type": "resource agent parameter",
+ "allowed": [
+ "deprecated",
+ ]
+ },
+ report_codes.FORCE_OPTIONS
+ ),
+ ]
+ )
+
class StonithdMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
def setUp(self):
self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
self.agent = lib_ra.StonithdMetadata(self.mock_runner)
-
def test_success(self):
metadata = """
<resource-agent>
@@ -1043,7 +1437,6 @@ class StonithdMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
["/usr/libexec/pacemaker/stonithd", "metadata"]
)
-
def test_failed_to_get_xml(self):
self.mock_runner.run.return_value = ("", "some error", 1)
@@ -1060,7 +1453,6 @@ class StonithdMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
["/usr/libexec/pacemaker/stonithd", "metadata"]
)
-
def test_invalid_xml(self):
self.mock_runner.run.return_value = ("some garbage", "", 0)
@@ -1069,7 +1461,7 @@ class StonithdMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
self.agent._get_metadata,
{
"agent": "stonithd",
- "message": "Start tag expected, '<' not found, line 1, column 1",
+ "message": start_tag_error_text(),
}
)
@@ -1078,14 +1470,13 @@ class StonithdMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
)
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at patch_agent_object("_get_metadata")
class StonithdMetadataGetParametersTest(TestCase):
def setUp(self):
self.agent = lib_ra.StonithdMetadata(
mock.MagicMock(spec_set=CommandRunner)
)
-
def test_success(self, mock_metadata):
xml = """
<resource-agent>
@@ -1116,7 +1507,9 @@ class StonithdMetadataGetParametersTest(TestCase):
"type": "test_type",
"required": False,
"default": "default_value",
- "advanced": True
+ "advanced": True,
+ "deprecated": False,
+ "obsoletes": None,
},
{
"name": "another parameter",
@@ -1125,27 +1518,26 @@ class StonithdMetadataGetParametersTest(TestCase):
"type": "string",
"required": False,
"default": None,
- "advanced": False
+ "advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
}
]
)
-class CrmAgentMetadataGetNameTest(TestCase, ExtendedAssertionsMixin):
- def test_success(self):
- mock_runner = mock.MagicMock(spec_set=CommandRunner)
- agent_name = "ocf:pacemaker:Dummy"
- agent = lib_ra.CrmAgent(mock_runner, agent_name)
+class CrmAgentDescendant(lib_ra.CrmAgent):
+ def _prepare_name_parts(self, name):
+ return lib_ra.ResourceAgentName("STANDARD", None, name)
- self.assertEqual(agent.get_name(), agent_name)
+ def get_name(self):
+ return self.get_type()
class CrmAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
def setUp(self):
self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
- self.agent_name = "ocf:pacemaker:Dummy"
- self.agent = lib_ra.CrmAgent(self.mock_runner, self.agent_name)
-
+ self.agent = CrmAgentDescendant(self.mock_runner, "TYPE")
def test_success(self):
metadata = """
@@ -1161,13 +1553,16 @@ class CrmAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
)
self.mock_runner.run.assert_called_once_with(
- ["/usr/sbin/crm_resource", "--show-metadata", self.agent_name],
+ [
+ "/usr/sbin/crm_resource",
+ "--show-metadata",
+ self.agent._get_full_name()
+ ],
env_extend={
"PATH": "/usr/sbin/:/bin/:/usr/bin/",
}
)
-
def test_failed_to_get_xml(self):
self.mock_runner.run.return_value = ("", "some error", 1)
@@ -1175,19 +1570,22 @@ class CrmAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
lib_ra.UnableToGetAgentMetadata,
self.agent._get_metadata,
{
- "agent": self.agent_name,
+ "agent": self.agent.get_name(),
"message": "some error",
}
)
self.mock_runner.run.assert_called_once_with(
- ["/usr/sbin/crm_resource", "--show-metadata", self.agent_name],
+ [
+ "/usr/sbin/crm_resource",
+ "--show-metadata",
+ self.agent._get_full_name()
+ ],
env_extend={
"PATH": "/usr/sbin/:/bin/:/usr/bin/",
}
)
-
def test_invalid_xml(self):
self.mock_runner.run.return_value = ("some garbage", "", 0)
@@ -1195,13 +1593,17 @@ class CrmAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
lib_ra.UnableToGetAgentMetadata,
self.agent._get_metadata,
{
- "agent": self.agent_name,
- "message": "Start tag expected, '<' not found, line 1, column 1",
+ "agent": self.agent.get_name(),
+ "message": start_tag_error_text(),
}
)
self.mock_runner.run.assert_called_once_with(
- ["/usr/sbin/crm_resource", "--show-metadata", self.agent_name],
+ [
+ "/usr/sbin/crm_resource",
+ "--show-metadata",
+ self.agent._get_full_name()
+ ],
env_extend={
"PATH": "/usr/sbin/:/bin/:/usr/bin/",
}
@@ -1211,9 +1613,7 @@ class CrmAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
class CrmAgentMetadataIsValidAgentTest(TestCase):
def setUp(self):
self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
- self.agent_name = "ocf:pacemaker:Dummy"
- self.agent = lib_ra.CrmAgent(self.mock_runner, self.agent_name)
-
+ self.agent = CrmAgentDescendant(self.mock_runner, "TYPE")
def test_success(self):
metadata = """
@@ -1225,7 +1625,6 @@ class CrmAgentMetadataIsValidAgentTest(TestCase):
self.assertTrue(self.agent.is_valid_metadata())
-
def test_fail(self):
self.mock_runner.run.return_value = ("", "", 1)
@@ -1252,11 +1651,9 @@ class StonithAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
self.agent_name
)
-
def tearDown(self):
lib_ra.StonithAgent._stonithd_metadata = None
-
def test_success(self):
metadata = """
<resource-agent>
@@ -1282,37 +1679,6 @@ class StonithAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
)
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
-class StonithAgentMetadataGetActionsTest(TestCase):
- def setUp(self):
- self.agent = lib_ra.StonithAgent(
- mock.MagicMock(spec_set=CommandRunner),
- "fence_dummy"
- )
-
-
- def tearDown(self):
- lib_ra.StonithAgent._stonithd_metadata = None
-
-
- def test_more_actions(self, mock_metadata):
- xml = """
- <resource-agent>
- <actions>
- <action name="on" automatic="0"/>
- <action name="off" />
- <action name="reboot" />
- <action name="status" />
- </actions>
- </resource-agent>
- """
- mock_metadata.return_value = etree.XML(xml)
- self.assertEqual(
- self.agent.get_actions(),
- []
- )
-
-
class StonithAgentMetadataGetParametersTest(TestCase):
def setUp(self):
self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
@@ -1322,11 +1688,9 @@ class StonithAgentMetadataGetParametersTest(TestCase):
self.agent_name
)
-
def tearDown(self):
lib_ra.StonithAgent._stonithd_metadata = None
-
def test_success(self):
metadata = """
<resource-agent>
@@ -1366,7 +1730,9 @@ class StonithAgentMetadataGetParametersTest(TestCase):
"type": "string",
"required": False,
"default": None,
- "advanced": False
+ "advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
},
{
"name": "action",
@@ -1379,7 +1745,9 @@ class StonithAgentMetadataGetParametersTest(TestCase):
"type": "string",
"required": False,
"default": None,
- "advanced": False
+ "advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
},
{
"name": "another_param",
@@ -1388,7 +1756,9 @@ class StonithAgentMetadataGetParametersTest(TestCase):
"type": "string",
"required": False,
"default": None,
- "advanced": False
+ "advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
},
{
"name": "stonithd_param",
@@ -1397,7 +1767,9 @@ class StonithAgentMetadataGetParametersTest(TestCase):
"type": "string",
"required": False,
"default": None,
- "advanced": False
+ "advanced": False,
+ "deprecated": False,
+ "obsoletes": None,
},
]
)
@@ -1420,7 +1792,7 @@ class StonithAgentMetadataGetParametersTest(TestCase):
])
- at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at patch_agent_object("_get_metadata")
class StonithAgentMetadataGetProvidesUnfencingTest(TestCase):
def setUp(self):
self.agent = lib_ra.StonithAgent(
@@ -1428,11 +1800,9 @@ class StonithAgentMetadataGetProvidesUnfencingTest(TestCase):
"fence_dummy"
)
-
def tearDown(self):
lib_ra.StonithAgent._stonithd_metadata = None
-
def test_true(self, mock_metadata):
xml = """
<resource-agent>
@@ -1447,7 +1817,6 @@ class StonithAgentMetadataGetProvidesUnfencingTest(TestCase):
mock_metadata.return_value = etree.XML(xml)
self.assertTrue(self.agent.get_provides_unfencing())
-
def test_no_action_on(self, mock_metadata):
xml = """
<resource-agent>
@@ -1461,7 +1830,6 @@ class StonithAgentMetadataGetProvidesUnfencingTest(TestCase):
mock_metadata.return_value = etree.XML(xml)
self.assertFalse(self.agent.get_provides_unfencing())
-
def test_no_tagret(self, mock_metadata):
xml = """
<resource-agent>
@@ -1476,7 +1844,6 @@ class StonithAgentMetadataGetProvidesUnfencingTest(TestCase):
mock_metadata.return_value = etree.XML(xml)
self.assertFalse(self.agent.get_provides_unfencing())
-
def test_no_automatic(self, mock_metadata):
xml = """
<resource-agent>
@@ -1491,6 +1858,7 @@ class StonithAgentMetadataGetProvidesUnfencingTest(TestCase):
mock_metadata.return_value = etree.XML(xml)
self.assertFalse(self.agent.get_provides_unfencing())
+
class ResourceAgentTest(TestCase):
def test_raises_on_invalid_name(self):
self.assertRaises(
@@ -1499,7 +1867,60 @@ class ResourceAgentTest(TestCase):
)
def test_does_not_raise_on_valid_name(self):
- lib_ra.ResourceAgent(mock.MagicMock(), "formal:valid:name")
+ lib_ra.ResourceAgent(mock.MagicMock(), "ocf:heardbeat:name")
+
+
+ at patch_agent_object("_get_metadata")
+class ResourceAgentGetParameters(TestCase):
+ def fixture_metadata(self, params):
+ return etree.XML("""
+ <resource-agent>
+ <parameters>{0}</parameters>
+ </resource-agent>
+ """.format(['<parameter name="{0}" />'.format(name) for name in params])
+ )
+
+ def assert_param_names(self, expected_names, actual_params):
+ self.assertEqual(
+ expected_names,
+ [param["name"] for param in actual_params]
+ )
+
+ def test_add_trace_parameters_to_ocf(self, mock_metadata):
+ mock_metadata.return_value = self.fixture_metadata(["test_param"])
+ agent = lib_ra.ResourceAgent(
+ mock.MagicMock(spec_set=CommandRunner),
+ "ocf:pacemaker:test"
+ )
+ self.assert_param_names(
+ ["test_param", "trace_ra", "trace_file"],
+ agent.get_parameters()
+ )
+
+ def test_do_not_add_trace_parameters_if_present(self, mock_metadata):
+ mock_metadata.return_value = self.fixture_metadata([
+ "trace_ra", "test_param", "trace_file"
+ ])
+ agent = lib_ra.ResourceAgent(
+ mock.MagicMock(spec_set=CommandRunner),
+ "ocf:pacemaker:test"
+ )
+ self.assert_param_names(
+ ["trace_ra", "test_param", "trace_file"],
+ agent.get_parameters()
+ )
+
+ def test_do_not_add_trace_parameters_to_others(self, mock_metadata):
+ mock_metadata.return_value = self.fixture_metadata(["test_param"])
+ agent = lib_ra.ResourceAgent(
+ mock.MagicMock(spec_set=CommandRunner),
+ "service:test"
+ )
+ self.assert_param_names(
+ ["test_param"],
+ agent.get_parameters()
+ )
+
class FindResourceAgentByNameTest(TestCase):
def setUp(self):
@@ -1563,7 +1984,7 @@ class FindResourceAgentByNameTest(TestCase):
ResourceAgent.assert_called_once_with(self.runner, name)
AbsentResourceAgent.assert_called_once_with(self.runner, name)
error_to_report_item.assert_called_once_with(
- e, severity=severity.WARNING, forceable=True
+ e, severity=severity.WARNING
)
self.report_processor.process.assert_called_once_with(report)
@@ -1584,7 +2005,7 @@ class FindResourceAgentByNameTest(TestCase):
self.assertEqual(report, context_manager.exception.args[0])
ResourceAgent.assert_called_once_with(self.runner, name)
- error_to_report_item.assert_called_once_with(e)
+ error_to_report_item.assert_called_once_with(e, forceable=True)
@patch_agent("resource_agent_error_to_report_item")
@patch_agent("ResourceAgent")
@@ -1603,6 +2024,52 @@ class FindResourceAgentByNameTest(TestCase):
ResourceAgent.assert_called_once_with(self.runner, name)
error_to_report_item.assert_called_once_with(e)
+
+class FindStonithAgentByName(TestCase):
+ # It is quite similar to find_valid_stonith_agent_by_name, so only minimum
+ # tests here:
+ # - test success
+ # - test with ":" in agent name - there was a bug
+ def setUp(self):
+ self.report_processor = mock.MagicMock()
+ self.runner = mock.MagicMock()
+ self.run = partial(
+ lib_ra.find_valid_stonith_agent_by_name,
+ self.report_processor,
+ self.runner,
+ )
+
+ @patch_agent("StonithAgent")
+ def test_returns_real_agent_when_is_there(self, StonithAgent):
+ #setup
+ name = "fence_xvm"
+
+ agent = mock.MagicMock()
+ agent.validate_metadata = mock.Mock(return_value=agent)
+ StonithAgent.return_value = agent
+
+ #test
+ self.assertEqual(agent, self.run(name))
+ StonithAgent.assert_called_once_with(self.runner, name)
+
+ @patch_agent("resource_agent_error_to_report_item")
+ @patch_agent("StonithAgent")
+ def test_raises_on_invalid_name(self, StonithAgent, error_to_report_item):
+ name = "fence_xvm:invalid"
+ report = "INVALID_STONITH_AGENT_NAME"
+ e = lib_ra.InvalidStonithAgentName(name, "invalid agent name")
+
+ StonithAgent.side_effect = e
+ error_to_report_item.return_value = report
+
+ with self.assertRaises(LibraryError) as context_manager:
+ self.run(name)
+
+ self.assertEqual(report, context_manager.exception.args[0])
+ StonithAgent.assert_called_once_with(self.runner, name)
+ error_to_report_item.assert_called_once_with(e)
+
+
class AbsentResourceAgentTest(TestCase):
@mock.patch.object(lib_ra.CrmAgent, "_load_metadata")
def test_behaves_like_a_proper_agent(self, load_metadata):
diff --git a/pcs/lib/test/test_validate.py b/pcs/lib/test/test_validate.py
new file mode 100644
index 0000000..d646d3f
--- /dev/null
+++ b/pcs/lib/test/test_validate.py
@@ -0,0 +1,1045 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+import re
+
+from pcs.common import report_codes
+from pcs.lib import validate
+from pcs.lib.cib.tools import IdProvider
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_report_item_list_equal
+from pcs.test.tools.pcs_unittest import TestCase
+
+class ValuesToPairs(TestCase):
+ def test_create_from_plain_values(self):
+ self.assertEqual(
+ {
+ "first": validate.ValuePair("A", "a"),
+ "second": validate.ValuePair("B", "b"),
+ },
+ validate.values_to_pairs(
+ {
+ "first": "A",
+ "second": "B",
+ },
+ lambda key, value: value.lower()
+ )
+ )
+
+ def test_keep_pair_if_is_already_there(self):
+ self.assertEqual(
+ {
+ "first": validate.ValuePair("A", "aaa"),
+ "second": validate.ValuePair("B", "b"),
+ },
+ validate.values_to_pairs(
+ {
+ "first": validate.ValuePair("A", "aaa"),
+ "second": "B",
+ },
+ lambda key, value: value.lower()
+ )
+ )
+
+class PairsToValues(TestCase):
+ def test_keep_values_if_is_not_pair(self):
+ self.assertEqual(
+ {
+ "first": "A",
+ "second": "B",
+ },
+ validate.pairs_to_values(
+ {
+ "first": "A",
+ "second": "B",
+ }
+ )
+ )
+
+ def test_extract_normalized_values(self):
+ self.assertEqual(
+ {
+ "first": "aaa",
+ "second": "B",
+ },
+ validate.pairs_to_values(
+ {
+ "first": validate.ValuePair(
+ original="A",
+ normalized="aaa"
+ ),
+ "second": "B",
+ }
+ )
+ )
+
+class OptionValueNormalization(TestCase):
+ def test_return_normalized_value_if_normalization_for_key_specified(self):
+ normalize = validate.option_value_normalization({
+ "first": lambda value: value.upper()
+ })
+ self.assertEqual("ONE", normalize("first", "one"))
+
+ def test_return_value_if_normalization_for_key_unspecified(self):
+ normalize = validate.option_value_normalization({})
+ self.assertEqual("one", normalize("first", "one"))
+
+
+class DependsOn(TestCase):
+ def test_success_when_dependency_present(self):
+ assert_report_item_list_equal(
+ validate.depends_on_option("name", "prerequisite", "type")({
+ "name": "value",
+ "prerequisite": "value",
+ }),
+ []
+ )
+
+ def test_report_when_dependency_missing(self):
+ assert_report_item_list_equal(
+ validate.depends_on_option(
+ "name", "prerequisite", "type1", "type2"
+ )({
+ "name": "value",
+ }),
+ [
+ (
+ severities.ERROR,
+ report_codes.PREREQUISITE_OPTION_IS_MISSING,
+ {
+ "option_name": "name",
+ "option_type": "type1",
+ "prerequisite_name": "prerequisite",
+ "prerequisite_type": "type2",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class IsRequired(TestCase):
+ def test_returns_no_report_when_required_is_present(self):
+ assert_report_item_list_equal(
+ validate.is_required("name", "some type")({"name": "monitor"}),
+ []
+ )
+
+ def test_returns_report_when_required_is_missing(self):
+ assert_report_item_list_equal(
+ validate.is_required("name", "some type")({}),
+ [
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": ["name"],
+ "option_type": "some type",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class IsRequiredSomeOf(TestCase):
+ def test_returns_no_report_when_first_is_present(self):
+ assert_report_item_list_equal(
+ validate.is_required_some_of(["first", "second"], "type")({
+ "first": "value",
+ }),
+ []
+ )
+
+ def test_returns_no_report_when_second_is_present(self):
+ assert_report_item_list_equal(
+ validate.is_required_some_of(["first", "second"], "type")({
+ "second": "value",
+ }),
+ []
+ )
+
+ def test_returns_report_when_missing(self):
+ assert_report_item_list_equal(
+ validate.is_required_some_of(["first", "second"], "type")({
+ "third": "value",
+ }),
+ [
+ (
+ severities.ERROR,
+ report_codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING,
+ {
+ "option_names": ["first", "second"],
+ "option_type": "type",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValueCondTest(TestCase):
+ def setUp(self):
+ self.predicate = lambda a: a == "b"
+
+ def test_returns_empty_report_on_valid_option(self):
+ self.assertEqual(
+ [],
+ validate.value_cond("a", self.predicate, "test")({"a": "b"})
+ )
+
+ def test_returns_empty_report_on_valid_normalized_option(self):
+ self.assertEqual(
+ [],
+ validate.value_cond("a", self.predicate, "test")(
+ {"a": validate.ValuePair(original="C", normalized="b")}
+ ),
+ )
+
+ def test_returns_report_about_invalid_option(self):
+ assert_report_item_list_equal(
+ validate.value_cond("a", self.predicate, "test")({"a": "c"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "c",
+ "allowed_values": "test",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_support_OptionValuePair(self):
+ assert_report_item_list_equal(
+ validate.value_cond("a", self.predicate, "test")(
+ {"a": validate.ValuePair(original="b", normalized="c")}
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "b",
+ "allowed_values": "test",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_supports_another_report_option_name(self):
+ assert_report_item_list_equal(
+ validate.value_cond(
+ "a", self.predicate, "test", option_name_for_report="option a"
+ )(
+ {"a": "c"}
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "option a",
+ "option_value": "c",
+ "allowed_values": "test",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_supports_forceable_errors(self):
+ assert_report_item_list_equal(
+ validate.value_cond(
+ "a", self.predicate, "test", code_to_allow_extra_values="FORCE"
+ )(
+ {"a": "c"}
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "c",
+ "allowed_values": "test",
+ },
+ "FORCE"
+ ),
+ ]
+ )
+
+ def test_supports_warning(self):
+ assert_report_item_list_equal(
+ validate.value_cond(
+ "a",
+ self.predicate,
+ "test",
+ code_to_allow_extra_values="FORCE",
+ allow_extra_values=True
+ )(
+ {"a": "c"}
+ ),
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "c",
+ "allowed_values": "test",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValueEmptyOrValid(TestCase):
+ def setUp(self):
+ self.validator = validate.value_cond("a", lambda a: a == "b", "test")
+
+ def test_missing(self):
+ assert_report_item_list_equal(
+ validate.value_empty_or_valid("a", self.validator)({"b": "c"}),
+ [
+ ]
+ )
+
+ def test_empty(self):
+ assert_report_item_list_equal(
+ validate.value_empty_or_valid("a", self.validator)({"a": ""}),
+ [
+ ]
+ )
+
+ def test_valid(self):
+ assert_report_item_list_equal(
+ validate.value_empty_or_valid("a", self.validator)({"a": "b"}),
+ [
+ ]
+ )
+
+ def test_not_valid(self):
+ assert_report_item_list_equal(
+ validate.value_empty_or_valid("a", self.validator)({"a": "c"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "c",
+ "allowed_values": "test",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValueId(TestCase):
+ def test_empty_id(self):
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id")({"id": ""}),
+ [
+ (
+ severities.ERROR,
+ report_codes.EMPTY_ID,
+ {
+ "id": "",
+ "id_description": "test id",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_invalid_first_char(self):
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id")({"id": "0-test"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "id": "0-test",
+ "id_description": "test id",
+ "invalid_character": "0",
+ "is_first_char": True,
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_invalid_char(self):
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id")({"id": "te#st"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_ID,
+ {
+ "id": "te#st",
+ "id_description": "test id",
+ "invalid_character": "#",
+ "is_first_char": False,
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_used_id(self):
+ id_provider = IdProvider(etree.fromstring("<a><test id='used' /></a>"))
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id", id_provider)({"id": "used"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ "id": "used",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_pair_invalid(self):
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id")({
+ "id": validate.ValuePair("@&#", "")
+ }),
+ [
+ (
+ severities.ERROR,
+ report_codes.EMPTY_ID,
+ {
+ # TODO: This should be "@&#". However an old validator
+ # is used and it doesn't work with pairs.
+ "id": "",
+ "id_description": "test id",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_pair_used_id(self):
+ id_provider = IdProvider(etree.fromstring("<a><test id='used' /></a>"))
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id", id_provider)({
+ "id": validate.ValuePair("not-used", "used")
+ }),
+ [
+ (
+ severities.ERROR,
+ report_codes.ID_ALREADY_EXISTS,
+ {
+ # TODO: This should be "not-used". However an old
+ # validator is used and it doesn't work with pairs.
+ "id": "used",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_success(self):
+ id_provider = IdProvider(etree.fromstring("<a><test id='used' /></a>"))
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id", id_provider)({"id": "correct"}),
+ []
+ )
+
+ def test_pair_success(self):
+ id_provider = IdProvider(etree.fromstring("<a><test id='used' /></a>"))
+ assert_report_item_list_equal(
+ validate.value_id("id", "test id", id_provider)({
+ "id": validate.ValuePair("correct", "correct")
+ }),
+ []
+ )
+
+
+class ValueIn(TestCase):
+ def test_returns_empty_report_on_valid_option(self):
+ self.assertEqual(
+ [],
+ validate.value_in("a", ["b"])({"a": "b"})
+ )
+
+ def test_returns_empty_report_on_valid_normalized_option(self):
+ self.assertEqual(
+ [],
+ validate.value_in("a", ["b"])(
+ {"a": validate.ValuePair(original="C", normalized="b")}
+ ),
+ )
+
+ def test_returns_report_about_invalid_option(self):
+ assert_report_item_list_equal(
+ validate.value_in("a", ["b"])({"a": "c"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "c",
+ "allowed_values": ["b"],
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_support_OptionValuePair(self):
+ assert_report_item_list_equal(
+ validate.value_in("a", ["b"])(
+ {"a": validate.ValuePair(original="C", normalized="c")}
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "C",
+ "allowed_values": ["b"],
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_supports_another_report_option_name(self):
+ assert_report_item_list_equal(
+ validate.value_in("a", ["b"], option_name_for_report="option a")(
+ {"a": "c"}
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "option a",
+ "option_value": "c",
+ "allowed_values": ["b"],
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_supports_forceable_errors(self):
+ assert_report_item_list_equal(
+ validate.value_in("a", ["b"], code_to_allow_extra_values="FORCE")(
+ {"a": "c"}
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "c",
+ "allowed_values": ["b"],
+ },
+ "FORCE"
+ ),
+ ]
+ )
+
+ def test_supports_warning(self):
+ assert_report_item_list_equal(
+ validate.value_in(
+ "a",
+ ["b"],
+ code_to_allow_extra_values="FORCE",
+ allow_extra_values=True
+ )(
+ {"a": "c"}
+ ),
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "c",
+ "allowed_values": ["b"],
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValueNonnegativeInteger(TestCase):
+ # The real code only calls value_cond => only basic tests here.
+ def test_empty_report_on_valid_option(self):
+ assert_report_item_list_equal(
+ validate.value_nonnegative_integer("key")({"key": "10"}),
+ []
+ )
+
+ def test_report_invalid_value(self):
+ assert_report_item_list_equal(
+ validate.value_nonnegative_integer("key")({"key": "-10"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "key",
+ "option_value": "-10",
+ "allowed_values": "a non-negative integer",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValueNotEmpty(TestCase):
+ def test_empty_report_on_not_empty_value(self):
+ assert_report_item_list_equal(
+ validate.value_not_empty("key", "description")({"key": "abc"}),
+ []
+ )
+
+ def test_empty_report_on_zero_int_value(self):
+ assert_report_item_list_equal(
+ validate.value_not_empty("key", "description")({"key": 0}),
+ []
+ )
+
+ def test_report_on_empty_string(self):
+ assert_report_item_list_equal(
+ validate.value_not_empty("key", "description")({"key": ""}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "key",
+ "option_value": "",
+ "allowed_values": "description",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValuePortNumber(TestCase):
+ # The real code only calls value_cond => only basic tests here.
+ def test_empty_report_on_valid_option(self):
+ assert_report_item_list_equal(
+ validate.value_port_number("key")({"key": "54321"}),
+ []
+ )
+
+ def test_report_invalid_value(self):
+ assert_report_item_list_equal(
+ validate.value_port_number("key")({"key": "65536"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "key",
+ "option_value": "65536",
+ "allowed_values": "a port number (1-65535)",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValuePortRange(TestCase):
+ # The real code only calls value_cond => only basic tests here.
+ def test_empty_report_on_valid_option(self):
+ assert_report_item_list_equal(
+ validate.value_port_range("key")({"key": "100-200"}),
+ []
+ )
+
+ def test_report_nonsense(self):
+ assert_report_item_list_equal(
+ validate.value_port_range("key")({"key": "10-20-30"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "key",
+ "option_value": "10-20-30",
+ "allowed_values": "port-port",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_report_bad_start(self):
+ assert_report_item_list_equal(
+ validate.value_port_range("key")({"key": "0-100"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "key",
+ "option_value": "0-100",
+ "allowed_values": "port-port",
+ },
+ None
+ ),
+ ]
+ )
+
+ def test_report_bad_end(self):
+ assert_report_item_list_equal(
+ validate.value_port_range("key")({"key": "100-65536"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "key",
+ "option_value": "100-65536",
+ "allowed_values": "port-port",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class ValuePositiveInteger(TestCase):
+ # The real code only calls value_cond => only basic tests here.
+ def test_empty_report_on_valid_option(self):
+ assert_report_item_list_equal(
+ validate.value_positive_integer("key")({"key": "10"}),
+ []
+ )
+
+ def test_report_invalid_value(self):
+ assert_report_item_list_equal(
+ validate.value_positive_integer("key")({"key": "0"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "key",
+ "option_value": "0",
+ "allowed_values": "a positive integer",
+ },
+ None
+ ),
+ ]
+ )
+
+
+class MutuallyExclusive(TestCase):
+ def test_returns_empty_report_when_valid(self):
+ assert_report_item_list_equal(
+ validate.mutually_exclusive(["a", "b"])({"a": "A"}),
+ [],
+ )
+
+ def test_returns_mutually_exclusive_report_on_2_names_conflict(self):
+ assert_report_item_list_equal(
+ validate.mutually_exclusive(["a", "b", "c"])({
+ "a": "A",
+ "b": "B",
+ "d": "D",
+ }),
+ [
+ (
+ severities.ERROR,
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ {
+ "option_type": "option",
+ "option_names": ["a", "b"],
+ },
+ None
+ ),
+ ],
+ )
+
+ def test_returns_mutually_exclusive_report_on_multiple_name_conflict(self):
+ assert_report_item_list_equal(
+ validate.mutually_exclusive(["a", "b", "c", "e"])({
+ "a": "A",
+ "b": "B",
+ "c": "C",
+ "d": "D",
+ }),
+ [
+ (
+ severities.ERROR,
+ report_codes.MUTUALLY_EXCLUSIVE_OPTIONS,
+ {
+ "option_type": "option",
+ "option_names": ["a", "b", "c"],
+ },
+ None
+ ),
+ ],
+ )
+
+class CollectOptionValidations(TestCase):
+ def test_collect_all_errors_from_specifications(self):
+ specification = [
+ lambda option_dict: ["A{0}".format(option_dict["x"])],
+ lambda option_dict: ["B"],
+ ]
+
+ self.assertEqual(
+ ["Ay", "B"],
+ validate.run_collection_of_option_validators(
+ {"x": "y"},
+ specification
+ )
+ )
+
+class NamesIn(TestCase):
+ def test_return_empty_report_on_allowed_names(self):
+ assert_report_item_list_equal(
+ validate.names_in(
+ ["a", "b", "c"],
+ ["a", "b"],
+ ),
+ [],
+ )
+
+ def test_return_error_on_not_allowed_names(self):
+ assert_report_item_list_equal(
+ validate.names_in(
+ ["a", "b", "c"],
+ ["x", "y"],
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["x", "y"],
+ "allowed": ["a", "b", "c"],
+ "option_type": "option",
+ },
+ None
+ )
+ ]
+ )
+
+ def test_return_error_on_not_allowed_names_without_force_code(self):
+ assert_report_item_list_equal(
+ validate.names_in(
+ ["a", "b", "c"],
+ ["x", "y"],
+ #does now work without code_to_allow_extra_names
+ allow_extra_names=True,
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["x", "y"],
+ "allowed": ["a", "b", "c"],
+ "option_type": "option",
+ },
+ None
+ )
+ ]
+ )
+
+ def test_return_forceable_error_on_not_allowed_names(self):
+ assert_report_item_list_equal(
+ validate.names_in(
+ ["a", "b", "c"],
+ ["x", "y"],
+ option_type="some option",
+ code_to_allow_extra_names="FORCE_CODE",
+ ),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["x", "y"],
+ "allowed": ["a", "b", "c"],
+ "option_type": "some option",
+ },
+ "FORCE_CODE"
+ )
+ ]
+ )
+
+ def test_return_warning_on_not_allowed_names(self):
+ assert_report_item_list_equal(
+ validate.names_in(
+ ["a", "b", "c"],
+ ["x", "y"],
+ option_type="some option",
+ code_to_allow_extra_names="FORCE_CODE",
+ allow_extra_names=True,
+ ),
+ [
+ (
+ severities.WARNING,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["x", "y"],
+ "allowed": ["a", "b", "c"],
+ "option_type": "some option",
+ },
+ None
+ )
+ ]
+ )
+
+
+class IsInteger(TestCase):
+ def test_no_range(self):
+ self.assertTrue(validate.is_integer(1))
+ self.assertTrue(validate.is_integer("1"))
+ self.assertTrue(validate.is_integer(-1))
+ self.assertTrue(validate.is_integer("-1"))
+ self.assertTrue(validate.is_integer(+1))
+ self.assertTrue(validate.is_integer("+1"))
+ self.assertTrue(validate.is_integer(" 1"))
+ self.assertTrue(validate.is_integer("-1 "))
+ self.assertTrue(validate.is_integer("+1 "))
+
+ self.assertFalse(validate.is_integer(""))
+ self.assertFalse(validate.is_integer("1a"))
+ self.assertFalse(validate.is_integer("a1"))
+ self.assertFalse(validate.is_integer("aaa"))
+ self.assertFalse(validate.is_integer(1.0))
+ self.assertFalse(validate.is_integer("1.0"))
+
+ def test_at_least(self):
+ self.assertTrue(validate.is_integer(5, 5))
+ self.assertTrue(validate.is_integer(5, 4))
+ self.assertTrue(validate.is_integer("5", 5))
+ self.assertTrue(validate.is_integer("5", 4))
+
+ self.assertFalse(validate.is_integer(5, 6))
+ self.assertFalse(validate.is_integer("5", 6))
+
+ def test_at_most(self):
+ self.assertTrue(validate.is_integer(5, None, 5))
+ self.assertTrue(validate.is_integer(5, None, 6))
+ self.assertTrue(validate.is_integer("5", None, 5))
+ self.assertTrue(validate.is_integer("5", None, 6))
+
+ self.assertFalse(validate.is_integer(5, None, 4))
+ self.assertFalse(validate.is_integer("5", None, 4))
+
+ def test_range(self):
+ self.assertTrue(validate.is_integer(5, 5, 5))
+ self.assertTrue(validate.is_integer(5, 4, 6))
+ self.assertTrue(validate.is_integer("5", 5, 5))
+ self.assertTrue(validate.is_integer("5", 4, 6))
+
+ self.assertFalse(validate.is_integer(3, 4, 6))
+ self.assertFalse(validate.is_integer(7, 4, 6))
+ self.assertFalse(validate.is_integer("3", 4, 6))
+ self.assertFalse(validate.is_integer("7", 4, 6))
+
+
+class IsPortNumber(TestCase):
+ def test_valid_port(self):
+ self.assertTrue(validate.is_port_number(1))
+ self.assertTrue(validate.is_port_number("1"))
+ self.assertTrue(validate.is_port_number(65535))
+ self.assertTrue(validate.is_port_number("65535"))
+ self.assertTrue(validate.is_port_number(8192))
+ self.assertTrue(validate.is_port_number(" 8192 "))
+
+ def test_bad_port(self):
+ self.assertFalse(validate.is_port_number(0))
+ self.assertFalse(validate.is_port_number("0"))
+ self.assertFalse(validate.is_port_number(65536))
+ self.assertFalse(validate.is_port_number("65536"))
+ self.assertFalse(validate.is_port_number(-128))
+ self.assertFalse(validate.is_port_number("-128"))
+ self.assertFalse(validate.is_port_number("abcd"))
+
+
+class MatchesRegexp(TestCase):
+ def test_matches_string(self):
+ self.assertTrue(validate.matches_regexp("abcdcba", "^[a-d]+$"))
+
+ def test_matches_regexp(self):
+ self.assertTrue(validate.matches_regexp(
+ "abCDCBa",
+ re.compile("^[a-d]+$", re.IGNORECASE)
+ ))
+
+ def test_not_matches_string(self):
+ self.assertFalse(validate.matches_regexp("abcDcba", "^[a-d]+$"))
+
+ def test_not_matches_regexp(self):
+ self.assertFalse(validate.matches_regexp(
+ "abCeCBa",
+ re.compile("^[a-d]+$", re.IGNORECASE)
+ ))
+
+
+class IsEmptyString(TestCase):
+ def test_empty_string(self):
+ self.assertTrue(validate.is_empty_string(""))
+
+ def test_not_empty_string(self):
+ self.assertFalse(validate.is_empty_string("a"))
+ self.assertFalse(validate.is_empty_string("0"))
+ self.assertFalse(validate.is_empty_string(0))
+
+
+class IsTimeInterval(TestCase):
+ def test_no_reports_for_valid_time_interval(self):
+ for interval in ["0", "1s", "2sec", "3m", "4min", "5h", "6hr"]:
+ self.assertEquals(
+ [],
+ validate.value_time_interval("a")({"a": interval}),
+ "interval: {0}".format(interval)
+ )
+
+ def test_reports_about_invalid_interval(self):
+ assert_report_item_list_equal(
+ validate.value_time_interval("a")({"a": "invalid_value"}),
+ [
+ (
+ severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "a",
+ "option_value": "invalid_value",
+ "allowed_values":
+ "time interval (e.g. 1, 2s, 3m, 4h, ...)"
+ ,
+ },
+ None
+ ),
+ ]
+ )
diff --git a/pcs/lib/tools.py b/pcs/lib/tools.py
index 324047b..cd2d7f9 100644
--- a/pcs/lib/tools.py
+++ b/pcs/lib/tools.py
@@ -4,8 +4,13 @@ from __future__ import (
print_function,
unicode_literals,
)
+import binascii
+import os
+def generate_key(random_bytes_count=32):
+ return binascii.hexlify(os.urandom(random_bytes_count))
+
def environment_file_to_dict(config):
"""
Parse systemd Environment file. This parser is simplified version of
diff --git a/pcs/lib/validate.py b/pcs/lib/validate.py
new file mode 100644
index 0000000..e572ba9
--- /dev/null
+++ b/pcs/lib/validate.py
@@ -0,0 +1,532 @@
+"""
+Module contains list of functions that should be useful for validation.
+Example of use (how things play together):
+ >>> option_dict = {"some_option": "A"}
+ >>> validators = [
+ ... is_required("name"),
+ ... value_in("some_option", ["B", "C"])
+ ... ]
+ >>> report_list = run_collection_of_option_validators(
+ ... option_dict,
+ ... validators
+ ... )
+ >>> for report in report_list:
+ ... print(report)
+ ...
+ ...
+ ERROR REQUIRED_OPTION_IS_MISSING: {
+ 'option_type': 'option',
+ 'option_names': ['name']
+ }
+ ERROR INVALID_OPTION_VALUE: {
+ 'option_name': 'some_option',
+ 'option_value': 'A',
+ 'allowed_values': ['B', 'C']
+ }
+
+Sometimes we need to validate the normalized value but in report we need the
+original value. For this purposes is ValuePair and helpers like values_to_pairs
+and pairs_to_values.
+
+TODO provide parameters to provide forceable error/warning for functions that
+ does not support it
+"""
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from collections import namedtuple
+import re
+
+from pcs.common.tools import is_string
+from pcs.lib import reports
+from pcs.lib.pacemaker.values import (
+ timeout_to_seconds,
+ validate_id,
+)
+
+
+### normalization
+
+class ValuePair(namedtuple("ValuePair", "original normalized")):
+ """
+ Storage for the original value and its normalized form
+ """
+
+ @staticmethod
+ def get(val):
+ return val if isinstance(val, ValuePair) else ValuePair(val, val)
+
+def values_to_pairs(option_dict, normalize):
+ """
+ Return a dict derived from option_dict where every value is instance of
+ ValuePair.
+
+ dict option_dict contains values that should be paired with the normalized
+ form
+ callable normalize should take key and value and return normalized form.
+ Function option_value_normalization can be good base for create such
+ callable.
+ """
+ option_dict_with_pairs = {}
+ for key, value in option_dict.items():
+ if not isinstance(value, ValuePair):
+ value = ValuePair(
+ original=value,
+ normalized=normalize(key, value),
+ )
+ option_dict_with_pairs[key] = value
+ return option_dict_with_pairs
+
+def pairs_to_values(option_dict):
+ """
+ Take a dict which has OptionValuePairs as its values and return dict with
+ normalized forms as its values. It is reverse function to
+ values_to_pairs.
+
+ dict option_dict contains OptionValuePairs as its values
+ """
+ raw_option_dict = {}
+ for key, value in option_dict.items():
+ if isinstance(value, ValuePair):
+ value = value.normalized
+ raw_option_dict[key] = value
+ return raw_option_dict
+
+def option_value_normalization(normalization_map):
+ """
+ Return function that takes key and value and return the normalized form.
+
+ dict normalization_map has on each key function that takes value and return
+ its normalized form.
+ """
+ def normalize(key, value):
+ return(
+ value if key not in normalization_map
+ else normalization_map[key](value)
+ )
+ return normalize
+
+### keys validators
+
+def depends_on_option(
+ option_name, prerequisite_option, option_type="", prerequisite_type=""
+):
+ """
+ Get a validator reporting REQUIRED_OPTION_IS_MISSING when the option_dict
+ does not contain the prerequisite_option and contains the option_name.
+
+ string option_name -- name of the option to check
+ string prerequisite_option -- name of the option which is a prerequisite
+ string option_type -- describes a type of the option for reporting purposes
+ """
+ def validate(option_dict):
+ if (
+ option_name in option_dict
+ and
+ prerequisite_option not in option_dict
+ ):
+ return [reports.prerequisite_option_is_missing(
+ option_name,
+ prerequisite_option,
+ option_type,
+ prerequisite_type
+ )]
+ return []
+ return validate
+
+def is_required(option_name, option_type=""):
+ """
+ Return a the function that takes option_dict and returns report list
+ (with REQUIRED_OPTION_IS_MISSING when option_dict does not contain
+ option_name).
+
+ string option_name is name of option of option_dict that will be tested
+ string option_type describes type of option for reporting purposes
+ """
+ def validate(option_dict):
+ if option_name not in option_dict:
+ return [reports.required_option_is_missing(
+ [option_name],
+ option_type,
+ )]
+ return []
+ return validate
+
+def is_required_some_of(option_name_list, option_type=""):
+ """
+ Get a validator reporting REQUIRED_OPTION_IS_MISSING report when the
+ option_dict does not contain at least one item from the option_name_list.
+
+ iterable option_name_list -- names of options of the option_dict to test
+ string option_type -- describes a type of the option for reporting purposes
+ """
+ def validate(option_dict):
+ found_names = set.intersection(
+ set(option_dict.keys()),
+ set(option_name_list)
+ )
+ if len(found_names) < 1:
+ return [reports.required_option_of_alternatives_is_missing(
+ sorted(option_name_list),
+ option_type,
+ )]
+ return []
+ return validate
+
+def mutually_exclusive(mutually_exclusive_names, option_type="option"):
+ """
+ Return a list with report MUTUALLY_EXCLUSIVE_OPTIONS when in option_dict
+ appears more than one of mutually_exclusive_names.
+
+ list|set mutually_exclusive_names contains option names that cannot appear
+ together
+ string option_type describes type of option for reporting purposes
+ """
+ def validate(option_dict):
+ found_names = set.intersection(
+ set(option_dict.keys()),
+ set(mutually_exclusive_names)
+ )
+ if len(found_names) > 1:
+ return [reports.mutually_exclusive_options(
+ sorted(found_names),
+ option_type,
+ )]
+ return []
+ return validate
+
+def names_in(
+ allowed_name_list, name_list, option_type="option",
+ code_to_allow_extra_names=None, allow_extra_names=False
+):
+ """
+ Return a list with report INVALID_OPTION when in name_list is a name that is
+ not in allowed_name_list.
+
+ list allowed_name_list contains names which are valid
+ list name_list contains names for validation
+ string option_type describes type of option for reporting purposes
+ string code_to_allow_extra_names is code for forcing invalid names. If it is
+ empty report INVALID_OPTION is non-forceable error. If it is not empty
+ report INVALID_OPTION is forceable error or warning.
+ bool allow_extra_names is flag that complements code_to_allow_extra_names
+ and determines wheter is report INVALID_OPTION forceable error or
+ warning.
+ """
+ invalid_names = set(name_list) - set(allowed_name_list)
+ if not invalid_names:
+ return []
+
+ create_report = reports.get_problem_creator(
+ code_to_allow_extra_names,
+ allow_extra_names
+ )
+ return [create_report(
+ reports.invalid_option,
+ sorted(invalid_names),
+ sorted(allowed_name_list),
+ option_type,
+ )]
+
+### values validators
+
+def value_cond(
+ option_name, predicate, value_type_or_enum, option_name_for_report=None,
+ code_to_allow_extra_values=None, allow_extra_values=False
+):
+ """
+ Return a validation function that takes option_dict and returns report list
+ (with INVALID_OPTION_VALUE when option_name is not in allowed_values).
+
+ string option_name is name of option of option_dict that will be tested
+ function predicate takes one parameter, normalized value
+ list or string value_type_or_enum list of possible values or string
+ description of value type
+ string option_name_for_report is substitued by option name if is None
+ string code_to_allow_extra_values is code for forcing invalid names. If it
+ is empty report INVALID_OPTION is non-forceable error. If it is not
+ empty report INVALID_OPTION is forceable error or warning.
+ bool allow_extra_values is flag that complements code_to_allow_extra_values
+ and determines wheter is report INVALID_OPTION forceable error or
+ warning.
+ """
+ @_if_option_exists(option_name)
+ def validate(option_dict):
+ value = ValuePair.get(option_dict[option_name])
+
+ if not predicate(value.normalized):
+ create_report = reports.get_problem_creator(
+ code_to_allow_extra_values,
+ allow_extra_values
+ )
+ return [create_report(
+ reports.invalid_option_value,
+ option_name_for_report if option_name_for_report is not None
+ else option_name
+ ,
+ value.original,
+ value_type_or_enum,
+ )]
+
+ return []
+ return validate
+
+def value_empty_or_valid(option_name, validator):
+ """
+ Get a validator running the specified validator if the value is not empty
+
+ string option_name -- name of the option to check
+ function validator -- validator to run when the value is not an empty string
+ """
+ @_if_option_exists(option_name)
+ def validate(option_dict):
+ value = ValuePair.get(option_dict[option_name])
+ return (
+ [] if is_empty_string(value.normalized)
+ else validator(option_dict)
+ )
+ return validate
+
+def value_id(option_name, option_name_for_report=None, id_provider=None):
+ """
+ Get a validator reporting ID errors and optionally booking IDs along the way
+
+ string option_name -- name of the option to check
+ string option_name_for_report -- substitued by the option_name if not set
+ IdProvider id_provider -- used to check id uniqueness if set
+ """
+ @_if_option_exists(option_name)
+ def validate(option_dict):
+ value = ValuePair.get(option_dict[option_name])
+ report_list = []
+ validate_id(value.normalized, option_name_for_report, report_list)
+ if id_provider is not None and not report_list:
+ report_list.extend(
+ id_provider.book_ids(value.normalized)
+ )
+ return report_list
+ return validate
+
+def value_in(
+ option_name, allowed_values, option_name_for_report=None,
+ code_to_allow_extra_values=None, allow_extra_values=False
+):
+ """
+ Special case of value_cond function.returned function checks whenever value
+ is included allowed_values. If not list of ReportItem will be returned.
+
+ option_name -- string, name of option to check
+ allowed_values -- list of strings, list of possible values
+ option_name_for_report -- string, it is substitued by option name if is None
+ code_to_allow_extra_values -- string, code for forcing invalid names. If it
+ is empty report INVALID_OPTION is non-forceable error. If it is not
+ empty report INVALID_OPTION is forceable error or warning.
+ allow_extra_values -- bool, flag that complements code_to_allow_extra_values
+ and determines wheter is report INVALID_OPTION forceable error or
+ warning.
+ """
+ return value_cond(
+ option_name,
+ lambda normalized_value: normalized_value in allowed_values,
+ allowed_values,
+ option_name_for_report=option_name_for_report,
+ code_to_allow_extra_values=code_to_allow_extra_values,
+ allow_extra_values=allow_extra_values,
+ )
+
+def value_nonnegative_integer(
+ option_name, option_name_for_report=None,
+ code_to_allow_extra_values=None, allow_extra_values=False
+):
+ """
+ Get a validator reporting INVALID_OPTION_VALUE when the value is not
+ an integer greater than -1
+
+ string option_name -- name of the option to check
+ string option_name_for_report -- substitued by the option_name if not set
+ string code_to_allow_extra_values -- create a report forceable by this code
+ bool allow_extra_values -- create a warning instead of an error if True
+ """
+ return value_cond(
+ option_name,
+ lambda value: is_integer(value, 0),
+ "a non-negative integer",
+ option_name_for_report=option_name_for_report,
+ code_to_allow_extra_values=code_to_allow_extra_values,
+ allow_extra_values=allow_extra_values,
+ )
+
+def value_not_empty(
+ option_name, value_type_or_enum, option_name_for_report=None,
+ code_to_allow_extra_values=None, allow_extra_values=False
+):
+ """
+ Get a validator reporting INVALID_OPTION_VALUE when the value is empty
+
+ string option_name -- name of the option to check
+ string option_name_for_report -- substitued by the option_name if not set
+ string code_to_allow_extra_values -- create a report forceable by this code
+ bool allow_extra_values -- create a warning instead of an error if True
+ """
+ return value_cond(
+ option_name,
+ lambda value: not is_empty_string(value),
+ value_type_or_enum,
+ option_name_for_report=option_name_for_report,
+ code_to_allow_extra_values=code_to_allow_extra_values,
+ allow_extra_values=allow_extra_values,
+ )
+
+def value_port_number(
+ option_name, option_name_for_report=None,
+ code_to_allow_extra_values=None, allow_extra_values=False
+):
+ """
+ Get a validator reporting INVALID_OPTION_VALUE when the value is not a TCP
+ or UDP port number
+
+ string option_name -- name of the option to check
+ string option_name_for_report -- substitued by the option_name if not set
+ string code_to_allow_extra_values -- create a report forceable by this code
+ bool allow_extra_values -- create a warning instead of an error if True
+ """
+ return value_cond(
+ option_name,
+ is_port_number,
+ "a port number (1-65535)",
+ option_name_for_report=option_name_for_report,
+ code_to_allow_extra_values=code_to_allow_extra_values,
+ allow_extra_values=allow_extra_values,
+ )
+
+def value_port_range(
+ option_name, option_name_for_report=None,
+ code_to_allow_extra_values=None, allow_extra_values=False
+):
+ """
+ Get a validator reporting INVALID_OPTION_VALUE when the value is not a TCP
+ or UDP port range
+
+ string option_name -- name of the option to check
+ string option_name_for_report -- substitued by the option_name if not set
+ string code_to_allow_extra_values -- create a report forceable by this code
+ bool allow_extra_values -- create a warning instead of an error if True
+ """
+ return value_cond(
+ option_name,
+ lambda value: (
+ matches_regexp(value, "^[0-9]+-[0-9]+$")
+ and
+ all([is_port_number(part) for part in value.split("-", 1)])
+ ),
+ "port-port",
+ option_name_for_report=option_name_for_report,
+ code_to_allow_extra_values=code_to_allow_extra_values,
+ allow_extra_values=allow_extra_values,
+ )
+
+def value_positive_integer(
+ option_name, option_name_for_report=None,
+ code_to_allow_extra_values=None, allow_extra_values=False
+):
+ """
+ Get a validator reporting INVALID_OPTION_VALUE when the value is not
+ an integer greater than zero
+
+ string option_name -- name of the option to check
+ string option_name_for_report -- substitued by the option_name if not set
+ string code_to_allow_extra_values -- create a report forceable by this code
+ bool allow_extra_values -- create a warning instead of an error if True
+ """
+ return value_cond(
+ option_name,
+ lambda value: is_integer(value, 1),
+ "a positive integer",
+ option_name_for_report=option_name_for_report,
+ code_to_allow_extra_values=code_to_allow_extra_values,
+ allow_extra_values=allow_extra_values,
+ )
+
+def value_time_interval(option_name, option_name_for_report=None):
+ return value_cond(
+ option_name,
+ lambda normalized_value:
+ timeout_to_seconds(normalized_value) is not None
+ ,
+ "time interval (e.g. 1, 2s, 3m, 4h, ...)",
+ option_name_for_report=option_name_for_report,
+ )
+
+### tools and predicates
+
+def run_collection_of_option_validators(option_dict, validator_list):
+ """
+ Return a list with reports (ReportItems) about problems inside items of
+ option_dict.
+
+ dict option_dict is source of values to validate according to specification
+ list validator_list contains callables that takes option_dict and returns
+ list of reports
+ """
+ report_list = []
+ for validate in validator_list:
+ report_list.extend(validate(option_dict))
+ return report_list
+
+def is_empty_string(value):
+ """
+ Check if the specified value is an empty string
+
+ mixed value -- value to check
+ """
+ return is_string(value) and not value
+
+def is_integer(value, at_least=None, at_most=None):
+ """
+ Check if the specified value is an integer, optionally check a range
+
+ mixed value -- string, int or float, value to check
+ """
+ try:
+ if isinstance(value, float):
+ return False
+ value_int = int(value)
+ if at_least is not None and value_int < at_least:
+ return False
+ if at_most is not None and value_int > at_most:
+ return False
+ except ValueError:
+ return False
+ return True
+
+def is_port_number(value):
+ """
+ Check if the specified value is a TCP or UDP port number
+
+ mixed value -- string, int or float, value to check
+ """
+ return is_integer(value, 1, 65535)
+
+def matches_regexp(value, regexp):
+ """
+ Check if the specified value matches the specified regular expression
+
+ mixed value -- string, int or float, value to check
+ mixed regexp -- string or RegularExpression to match the value against
+ """
+ if not hasattr(regexp, "match"):
+ regexp = re.compile(regexp)
+ return regexp.match(value) is not None
+
+def _if_option_exists(option_name):
+ def params_wrapper(validate_func):
+ def prepare(option_dict):
+ if option_name not in option_dict:
+ return []
+ return validate_func(option_dict)
+ return prepare
+ return params_wrapper
diff --git a/pcs/lib/xml_tools.py b/pcs/lib/xml_tools.py
new file mode 100644
index 0000000..67e7ca1
--- /dev/null
+++ b/pcs/lib/xml_tools.py
@@ -0,0 +1,86 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+def get_root(tree):
+ # ElementTree has getroot, Elemet has getroottree
+ return tree.getroot() if hasattr(tree, "getroot") else tree.getroottree()
+
+def find_parent(element, tag_names):
+ """
+ Find parent of an element based on parent's tag name. Return the parent
+ element or None if such element does not exist.
+
+ etree element -- the element whose parent we want to find
+ strings tag_names -- allowed tag names of parent we are looking for
+ """
+ candidate = element
+ while True:
+ if candidate is None or candidate.tag in tag_names:
+ return candidate
+ candidate = candidate.getparent()
+
+def get_sub_element(element, sub_element_tag, new_id=None, new_index=None):
+ """
+ Returns the FIRST sub-element sub_element_tag of element. It will create new
+ element if such doesn't exist yet.
+
+ element -- parent element
+ sub_element_tag -- tag of the wanted new element
+ new_id -- id of the new element, None means no id will be set
+ new_index -- where the new element will be added, None means at the end
+ """
+ sub_element = element.find("./{0}".format(sub_element_tag))
+ if sub_element is None:
+ sub_element = etree.Element(sub_element_tag)
+ if new_id:
+ sub_element.set("id", new_id)
+ if new_index is None:
+ element.append(sub_element)
+ else:
+ element.insert(new_index, sub_element)
+ return sub_element
+
+def export_attributes(element):
+ return dict((key, value) for key, value in element.attrib.items())
+
+def update_attribute_remove_empty(element, name, value):
+ """
+ Set an attribute's value or remove the attribute if the value is ""
+
+ etree element -- element to be updated
+ string name -- attribute name
+ mixed value -- attribute value
+ """
+ if len(value) < 1:
+ if name in element.attrib:
+ del element.attrib[name]
+ return
+ element.set(name, value)
+
+def update_attributes_remove_empty(element, attributtes):
+ """
+ Set an attributes' values or remove an attribute if its new value is ""
+
+ etree element -- element to be updated
+ dict attributes -- new attributes' values
+ """
+ for name, value in attributtes.items():
+ update_attribute_remove_empty(element, name, value)
+
+def etree_element_attibutes_to_dict(etree_el, required_key_list):
+ """
+ Returns all attributes of etree_el from required_key_list in dictionary,
+ where keys are attributes and values are values of attributes or None if
+ it's not present.
+
+ etree_el -- etree element from which attributes should be extracted
+ required_key_list -- list of strings, attributes names which should be
+ extracted
+ """
+ return dict([(key, etree_el.get(key)) for key in required_key_list])
diff --git a/pcs/node.py b/pcs/node.py
index 729ea35..6bfe1bc 100644
--- a/pcs/node.py
+++ b/pcs/node.py
@@ -12,142 +12,86 @@ from pcs import (
usage,
utils,
)
-from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.common.errors import (
+ CmdLineInputError,
+ ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE,
+)
from pcs.cli.common.parse_args import prepare_options
from pcs.lib.errors import LibraryError
-import pcs.lib.pacemaker as lib_pacemaker
-from pcs.lib.pacemaker_values import get_valid_timeout_seconds
+import pcs.lib.pacemaker.live as lib_pacemaker
-def node_cmd(argv):
- if len(argv) == 0:
+def node_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
usage.node()
sys.exit(1)
- sub_cmd = argv.pop(0)
- if sub_cmd == "help":
- usage.node(argv)
- elif sub_cmd == "maintenance":
- node_maintenance(argv)
- elif sub_cmd == "unmaintenance":
- node_maintenance(argv, False)
- elif sub_cmd == "standby":
- node_standby(argv)
- elif sub_cmd == "unstandby":
- node_standby(argv, False)
- elif sub_cmd == "attribute":
- if "--name" in utils.pcs_options and len(argv) > 1:
- usage.node("attribute")
- sys.exit(1)
- filter_attr=utils.pcs_options.get("--name", None)
- if len(argv) == 0:
- attribute_show_cmd(filter_attr=filter_attr)
- elif len(argv) == 1:
- attribute_show_cmd(argv.pop(0), filter_attr=filter_attr)
- else:
- attribute_set_cmd(argv.pop(0), argv)
- elif sub_cmd == "utilization":
- if "--name" in utils.pcs_options and len(argv) > 1:
- usage.node("utilization")
- sys.exit(1)
- filter_name=utils.pcs_options.get("--name", None)
- if len(argv) == 0:
- print_node_utilization(filter_name=filter_name)
- elif len(argv) == 1:
- print_node_utilization(argv.pop(0), filter_name=filter_name)
+ sub_cmd, argv_next = argv[0], argv[1:]
+
+ try:
+ if sub_cmd == "help":
+ usage.node([" ".join(argv_next)] if argv_next else [])
+ elif sub_cmd == "maintenance":
+ node_maintenance_cmd(lib, argv_next, modifiers, True)
+ elif sub_cmd == "unmaintenance":
+ node_maintenance_cmd(lib, argv_next, modifiers, False)
+ elif sub_cmd == "standby":
+ node_standby_cmd(lib, argv_next, modifiers, True)
+ elif sub_cmd == "unstandby":
+ node_standby_cmd(lib, argv_next, modifiers, False)
+ elif sub_cmd == "attribute":
+ node_attribute_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "utilization":
+ node_utilization_cmd(lib, argv_next, modifiers)
+ # pcs-to-pcsd use only
+ elif sub_cmd == "pacemaker-status":
+ node_pacemaker_status(lib, argv_next, modifiers)
else:
- try:
- set_node_utilization(argv.pop(0), argv)
- except CmdLineInputError as e:
- utils.exit_on_cmdline_input_errror(e, "node", "utilization")
- # pcs-to-pcsd use only
- elif sub_cmd == "pacemaker-status":
- node_pacemaker_status()
- else:
- usage.node()
- sys.exit(1)
+ raise CmdLineInputError()
+ except LibraryError as e:
+ utils.process_library_reports(e.args)
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(e, "node", sub_cmd)
+def node_attribute_cmd(lib, argv, modifiers):
+ if modifiers["name"] and len(argv) > 1:
+ raise CmdLineInputError()
+ if len(argv) == 0:
+ attribute_show_cmd(filter_attr=modifiers["name"])
+ elif len(argv) == 1:
+ attribute_show_cmd(argv.pop(0), filter_attr=modifiers["name"])
+ else:
+ attribute_set_cmd(argv.pop(0), argv)
-def node_maintenance(argv, on=True):
- action = ["-v", "on"] if on else ["-D"]
+def node_utilization_cmd(lib, argv, modifiers):
+ if modifiers["name"] and len(argv) > 1:
+ raise CmdLineInputError()
+ if len(argv) == 0:
+ print_node_utilization(filter_name=modifiers["name"])
+ elif len(argv) == 1:
+ print_node_utilization(argv.pop(0), filter_name=modifiers["name"])
+ else:
+ set_node_utilization(argv.pop(0), argv)
- cluster_nodes = utils.getNodesFromPacemaker()
- nodes = []
- failed_count = 0
- if "--all" in utils.pcs_options:
- nodes = cluster_nodes
+def node_maintenance_cmd(lib, argv, modifiers, enable):
+ if len(argv) > 0 and modifiers["all"]:
+ raise CmdLineInputError(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
+ if modifiers["all"]:
+ lib.node.maintenance_unmaintenance_all(enable, modifiers["wait"])
elif argv:
- for node in argv:
- if node not in cluster_nodes:
- utils.err(
- "Node '{0}' does not appear to exist in "
- "configuration".format(node),
- False
- )
- failed_count += 1
- else:
- nodes.append(node)
+ lib.node.maintenance_unmaintenance_list(enable, argv, modifiers["wait"])
else:
- nodes.append("")
-
- if failed_count > 0:
- sys.exit(1)
+ lib.node.maintenance_unmaintenance_local(enable, modifiers["wait"])
- for node in nodes:
- node_attr = ["-N", node] if node else []
- output, retval = utils.run(
- ["crm_attribute", "-t", "nodes", "-n", "maintenance"] + action +
- node_attr
- )
- if retval != 0:
- node_name = ("node '{0}'".format(node)) if argv else "current node"
- failed_count += 1
- if on:
- utils.err(
- "Unable to put {0} to maintenance mode: {1}".format(
- node_name, output
- ),
- False
- )
- else:
- utils.err(
- "Unable to remove {0} from maintenance mode: {1}".format(
- node_name, output
- ),
- False
- )
- if failed_count > 0:
- sys.exit(1)
-
-def node_standby(argv, standby=True):
- if (len(argv) > 1) or (len(argv) > 0 and "--all" in utils.pcs_options):
- usage.node(["standby" if standby else "unstandby"])
- sys.exit(1)
-
- all_nodes = "--all" in utils.pcs_options
- node_list = [argv[0]] if argv else []
- wait = False
- timeout = None
- if "--wait" in utils.pcs_options:
- wait = True
- timeout = utils.pcs_options["--wait"]
-
- try:
- if wait:
- lib_pacemaker.ensure_resource_wait_support(utils.cmd_runner())
- valid_timeout = get_valid_timeout_seconds(timeout)
- if standby:
- lib_pacemaker.nodes_standby(
- utils.cmd_runner(), node_list, all_nodes
- )
- else:
- lib_pacemaker.nodes_unstandby(
- utils.cmd_runner(), node_list, all_nodes
- )
- if wait:
- lib_pacemaker.wait_for_resources(utils.cmd_runner(), valid_timeout)
- except LibraryError as e:
- utils.process_library_reports(e.args)
+def node_standby_cmd(lib, argv, modifiers, enable):
+ if len(argv) > 0 and modifiers["all"]:
+ raise CmdLineInputError(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE)
+ if modifiers["all"]:
+ lib.node.standby_unstandby_all(enable, modifiers["wait"])
+ elif argv:
+ lib.node.standby_unstandby_list(enable, argv, modifiers["wait"])
+ else:
+ lib.node.standby_unstandby_local(enable, modifiers["wait"])
def set_node_utilization(node, argv):
cib = utils.get_cib_dom()
@@ -213,13 +157,10 @@ def print_node_utilization(filter_node=None, filter_name=None):
for node in sorted(utilization):
print(" {0}: {1}".format(node, utilization[node]))
-def node_pacemaker_status():
- try:
- print(json.dumps(
- lib_pacemaker.get_local_node_status(utils.cmd_runner())
- ))
- except LibraryError as e:
- utils.process_library_reports(e.args)
+def node_pacemaker_status(lib, argv, modifiers):
+ print(json.dumps(
+ lib_pacemaker.get_local_node_status(utils.cmd_runner())
+ ))
def attribute_show_cmd(filter_node=None, filter_attr=None):
node_attributes = utils.get_node_attributes(
@@ -230,11 +171,7 @@ def attribute_show_cmd(filter_node=None, filter_attr=None):
attribute_print(node_attributes)
def attribute_set_cmd(node, argv):
- try:
- attrs = prepare_options(argv)
- except CmdLineInputError as e:
- utils.exit_on_cmdline_input_errror(e, "node", "attribute")
- for name, value in attrs.items():
+ for name, value in prepare_options(argv).items():
utils.set_node_attribute(name, value, node)
def attribute_print(node_attributes):
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 0cf1be4..4edfc72 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "November 2016" "pcs 0.9.155" "System Administration Utilities"
+.TH PCS "8" "May 2017" "pcs 0.9.158" "System Administration Utilities"
.SH NAME
pcs \- pacemaker/corosync configuration system
.SH SYNOPSIS
@@ -19,73 +19,76 @@ Print all network traffic and external commands run.
.TP
\fB\-\-version\fR
Print pcs version information.
+.TP
+\fB\-\-request\-timeout=<timeout>\fR
+Timeout for each outgoing request to another node in seconds. Default is 60s.
.SS "Commands:"
.TP
cluster
-Configure cluster options and nodes.
+ Configure cluster options and nodes.
.TP
resource
-Manage cluster resources.
+ Manage cluster resources.
.TP
stonith
-Configure fence devices.
+ Manage fence devices.
.TP
constraint
-Set resource constraints.
+ Manage resource constraints.
.TP
property
-Set pacemaker properties.
+ Manage pacemaker properties.
.TP
acl
-Set pacemaker access control lists.
+ Manage pacemaker access control lists.
.TP
qdevice
-Manage quorum device provider on the local host.
+ Manage quorum device provider on the local host.
.TP
quorum
-Manage cluster quorum settings.
+ Manage cluster quorum settings.
.TP
booth
-Manage booth (cluster ticket manager).
+ Manage booth (cluster ticket manager).
.TP
status
-View cluster status.
+ View cluster status.
.TP
config
-View and manage cluster configuration.
+ View and manage cluster configuration.
.TP
pcsd
-Manage pcs daemon.
+ Manage pcs daemon.
.TP
node
-Manage cluster nodes.
+ Manage cluster nodes.
.TP
alert
-Manage pacemaker alerts.
+ Manage pacemaker alerts.
.SS "resource"
.TP
[show [<resource id>] | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR]
Show all currently configured resources or if a resource is specified show the options for the configured resource. If \fB\-\-full\fR is specified, all configured resource options will be displayed. If \fB\-\-groups\fR is specified, only show groups (and their resources). If \fB\-\-hide\-inactive\fR is specified, only show active resources.
.TP
list [filter] [\fB\-\-nodesc\fR]
-Show list of all available resource agents (if filter is provided then only resource agents matching the filter will be shown). If --nodesc is used then descriptions of resource agents are not printed.
+Show list of all available resource agents (if filter is provided then only resource agents matching the filter will be shown). If \fB\-\-nodesc\fR is used then descriptions of resource agents are not printed.
.TP
-describe [<standard>:[<provider>:]]<type>
-Show options for the specified resource.
+describe [<standard>:[<provider>:]]<type> [\fB\-\-full\fR]
+Show options for the specified resource. If \fB\-\-full\fR is specified, all options including advanced ones are shown.
.TP
-create <resource id> [<standard>:[<provider>:]]<type> [resource options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-clone\fR <clone options> | \fB\-\-master\fR <master options> | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
-Create specified resource. If \fB\-\-clone\fR is used a clone resource is created. If \fB\-\-master\fR is specified a master/slave resource is created. If \fB\-\-group\fR is specified the resource is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group. If \fB\-\-disabled\fR is specified the resource is not started automatically. If \fB\-\-wait\fR is specifie [...]
+create <resource id> [<standard>:[<provider>:]]<type> [resource options] [\fBop\fR <operation action> <operation options> [<operation action> <operation options>]...] [\fBmeta\fR <meta options>...] [\fBclone\fR [<clone options>] | \fBmaster\fR [<master options>] | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>] | \fBbundle\fR <bundle id>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
+Create specified resource. If \fBclone\fR is used a clone resource is created. If \fBmaster\fR is specified a master/slave resource is created. If \fB\-\-group\fR is specified the resource is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group. If \fBbundle\fR is specified, resource will be created inside of the specified bundle. If \fB\-\-disabled\fR is specified [...]
Example: Create a new resource called 'VirtualIP' with IP address 192.168.0.99, netmask of 32, monitored everything 30 seconds, on eth2: pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 nic=eth2 op monitor interval=30s
.TP
delete <resource id|group id|master id|clone id>
Deletes the resource, group, master or clone (and all resources within the group/master/clone).
.TP
-enable <resource id> [\fB\-\-wait\fR[=n]]
-Allow the cluster to start the resource. Depending on the rest of the configuration (constraints, options, failures, etc), the resource may remain stopped. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to start and then return 0 if the resource is started, or 1 if the resource has not yet started. If 'n' is not specified it defaults to 60 minutes.
+enable <resource id>... [\fB\-\-wait\fR[=n]]
+Allow the cluster to start the resources. Depending on the rest of the configuration (constraints, options, failures, etc), the resources may remain stopped. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resources to start and then return 0 if the resources are started, or 1 if the resources have not yet started. If 'n' is not specified it defaults to 60 minutes.
.TP
-disable <resource id> [\fB\-\-wait\fR[=n]]
-Attempt to stop the resource if it is running and forbid the cluster from starting it again. Depending on the rest of the configuration (constraints, options, failures, etc), the resource may remain started. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to stop and then return 0 if the resource is stopped or 1 if the resource has not stopped. If 'n' is not specified it defaults to 60 minutes.
+disable <resource id>... [\fB\-\-wait\fR[=n]]
+Attempt to stop the resources if they are running and forbid the cluster from starting them again. Depending on the rest of the configuration (constraints, options, failures, etc), the resources may remain started. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resources to stop and then return 0 if the resources are stopped or 1 if the resources have not stopped. If 'n' is not specified it defaults to 60 minutes.
.TP
restart <resource id> [node] [\fB\-\-wait\fR=n]
Restart the resource specified. If a node is specified and if the resource is a clone or master/slave it will be restarted only on the node specified. If \fB\-\-wait\fR is specified, then we will wait up to 'n' seconds for the resource to be restarted and return 0 if the restart was successful or 1 if it was not.
@@ -103,7 +106,7 @@ debug\-demote <resource id> [\fB\-\-full\fR]
This command will force the specified resource to be demoted on this node ignoring the cluster recommendations and print the output from demoting the resource. Using \fB\-\-full\fR will give more detailed output. This is mainly used for debugging resources that fail to demote.
.TP
debug\-monitor <resource id> [\fB\-\-full\fR]
-This command will force the specified resource to be moniored on this node ignoring the cluster recommendations and print the output from monitoring the resource. Using \fB\-\-full\fR will give more detailed output. This is mainly used for debugging resources that fail to be monitored.
+This command will force the specified resource to be monitored on this node ignoring the cluster recommendations and print the output from monitoring the resource. Using \fB\-\-full\fR will give more detailed output. This is mainly used for debugging resources that fail to be monitored.
.TP
move <resource id> [destination node] [\fB\-\-master\fR] [lifetime=<lifetime>] [\fB\-\-wait\fR[=n]]
Move the resource off the node it is currently running on by creating a \-INFINITY location constraint to ban the node. If destination node is specified the resource will be moved to that node by creating an INFINITY location constraint to prefer the destination node. If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id). If lifetime is specified then the constraint will expire after that time, ot [...]
@@ -145,13 +148,13 @@ group add <group id> <resource id> [resource id] ... [resource id] [\fB\-\-befor
Add the specified resource to the group, creating the group if it does not exist. If the resource is present in another group it is moved to the new group. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resources relatively to some resource already existing in the group. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error. [...]
.TP
group remove <group id> <resource id> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
-Remove the specified resource(s) from the group, removing the group if it no resources remain. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
+Remove the specified resource(s) from the group, removing the group if no resources remain in it. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
.TP
ungroup <group id> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
Remove the group (note: this does not remove any resources from the cluster) or if resources are specified, remove the specified resources from the group. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and the return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
.TP
clone <resource id | group id> [clone options]... [\fB\-\-wait\fR[=n]]
-Setup up the specified resource or group as a clone. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting clone instances if appropriate) and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
+Set up the specified resource or group as a clone. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting clone instances if appropriate) and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
.TP
unclone <resource id | group id> [\fB\-\-wait\fR[=n]]
Remove the clone which contains the specified group or resource (the resource or group will not be removed). If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including stopping clone instances if appropriate) and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
@@ -159,17 +162,23 @@ Remove the clone which contains the specified group or resource (the resource or
master [<master/slave id>] <resource id | group id> [options] [\fB\-\-wait\fR[=n]]
Configure a resource or group as a multi\-state (master/slave) resource. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes. Note: to remove a master you must remove the resource/group it contains.
.TP
-manage <resource id> ... [resource n]
-Set resources listed to managed mode (default).
+bundle create <bundle id> [container [<container type>] <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [\fB\-\-wait\fR[=n]]
+Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If the container type is not specified, it defaults to 'docker'. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
+.TP
+bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [\fB\-\-wait\fR[=n]]
+Add, remove or change options to specified bundle. If you wish to update a resource encapsulated in the bundle, use the 'pcs resource update' command instead and specify the resource id. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
.TP
-unmanage <resource id> ... [resource n]
-Set resources listed to unmanaged mode.
+manage <resource id>... [\fB\-\-monitor\fR]
+Set resources listed to managed mode (default). If \fB\-\-monitor\fR is specified, enable all monitor operations of the resources.
+.TP
+unmanage <resource id>... [\fB\-\-monitor\fR]
+Set resources listed to unmanaged mode. When a resource is in unmanaged mode, the cluster is not allowed to start nor stop the resource. If \fB\-\-monitor\fR is specified, disable all monitor operations of the resources.
.TP
defaults [options]
Set default values for resources, if no options are passed, lists currently configured defaults.
.TP
cleanup [<resource id>] [\fB\-\-node\fR <node>]
-Cleans up the resource in the lrmd (useful to reset the resource status and failcount). This tells the cluster to forget the operation history of a resource and re-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a resource id is not specified then all resources/stonith devices will be cleaned up. If a node is not specified then resources on all nodes will be cleaned up.
+Make the cluster forget the operation history of the resource and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a resource id is not specified then all resources/stonith devices will be cleaned up. If a node is not specified then resources/stonith devices on all nodes will be cleaned up.
.TP
failcount show <resource id> [node]
Show current failcount for specified resource from all nodes or only on specified node.
@@ -177,7 +186,7 @@ Show current failcount for specified resource from all nodes or only on specifie
failcount reset <resource id> [node]
Reset failcount for specified resource on all nodes or only on specified node. This tells the cluster to forget how many times a resource has failed in the past. This may allow the resource to be started or moved to a more preferred location.
.TP
-relocate dry-run [resource1] [resource2] ...
+relocate dry\-run [resource1] [resource2] ...
The same as 'relocate run' but has no effect on the cluster.
.TP
relocate run [resource1] [resource2] ...
@@ -194,9 +203,9 @@ Add specified utilization options to specified resource. If resource is not spec
.SS "cluster"
.TP
auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\fB\-\-local\fR]
-Authenticate pcs to pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re-authentication to occur.
+Authenticate pcs to pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re\-authentication to occur.
.TP
-setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [<node2[,node2-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\ [...]
+setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [...]
Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the syste [...]
\fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4). This option is not supported on CMAN clusters.
@@ -219,7 +228,7 @@ Configuring Redundant Ring Protocol (RRP)
When using udpu specifying nodes, specify the ring 0 address first
followed by a ',' and then the ring 1 address.
-Example: pcs cluster setup \-\-name cname nodeA-0,nodeA-1 nodeB-0,nodeB-1
+Example: pcs cluster setup \-\-name cname nodeA\-0,nodeA\-1 nodeB\-0,nodeB\-1
When using udp, using \fB\-\-addr0\fR and \fB\-\-addr1\fR will allow you to configure
rrp mode for corosync. It's recommended to use a network (instead of
@@ -229,41 +238,41 @@ be used around the cluster. \fB\-\-mcast0\fR defaults to 239.255.1.1 and
ttl defaults to 1. If \fB\-\-broadcast\fR is specified, \fB\-\-mcast0/1\fR,
\fB\-\-mcastport0/1\fR & \fB\-\-ttl0/1\fR are ignored.
.TP
-start [\fB\-\-all\fR] [node] [...] [\fB\-\-wait\fR[=<n>]]
+start [\fB\-\-all\fR | <node>... ] [\fB\-\-wait\fR[=<n>]]
Start corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are started on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are started on all nodes. If \fB\-\-wait\fR is specified, wait up to 'n' seconds for nodes to start.
.TP
-stop [\fB\-\-all\fR] [node] [...]
-Stop corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are stopped on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are stopped on all nodes.
+stop [\fB\-\-all\fR | <node>... ] [\fB\-\-request\-timeout\fR=<seconds>]
+Stop corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are stopped on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are stopped on all nodes. If the cluster is running resources which take long time to stop, the request may time out before the cluster actually stops. In that case you should consider setting \fB\-\-request\-timeout\fR to a suitable value.
.TP
kill
Force corosync and pacemaker daemons to stop on the local node (performs kill \-9). Note that init system (e.g. systemd) can detect that cluster is not running and start it again. If you want to stop cluster on a node, run pcs cluster stop on that node.
.TP
-enable [\fB\-\-all\fR] [node] [...]
-Configure corosync & pacemaker to run on node boot on specified node(s), if node is not specified then corosync & pacemaker are enabled on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are enabled on all nodes.
-.TP
-disable [\fB\-\-all\fR] [node] [...]
-Configure corosync & pacemaker to not run on node boot on specified node(s), if node is not specified then corosync & pacemaker are disabled on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are disabled on all nodes. Note: this is the default after installation.
+enable [\fB\-\-all\fR | <node>... ]
+Configure cluster to run on node boot on specified node(s). If node is not specified then cluster is enabled on the local node. If \fB\-\-all\fR is specified then cluster is enabled on all nodes.
.TP
-remote-node add <hostname> <resource id> [options]
-Enables the specified resource as a remote-node resource on the specified hostname (hostname should be the same as 'uname -n').
-.TP
-remote\-node remove <hostname>
-Disables any resources configured to be remote\-node resource on the specified hostname (hostname should be the same as 'uname -n').
+disable [\fB\-\-all\fR | <node>... ]
+Configure cluster to not run on node boot on specified node(s). If node is not specified then cluster is disabled on the local node. If \fB\-\-all\fR is specified then cluster is disabled on all nodes.
.TP
status
View current cluster status (an alias of 'pcs status cluster').
.TP
-pcsd\-status [node] [...]
-Get current status of pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified.
+pcsd\-status [<node>]...
+Show current status of pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified.
.TP
sync
Sync corosync configuration to all nodes found from current corosync.conf file (cluster.conf on systems running Corosync 1.x).
.TP
cib [filename] [scope=<scope> | \fB\-\-config\fR]
-Get the raw xml from the CIB (Cluster Information Base). If a filename is provided, we save the CIB to that file, otherwise the CIB is printed. Specify scope to get a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status. \fB\-\-config\fR is the same as scope=configuration. Do not specify a scope if you want to edit the saved CIB using pcs (pcs -f <command>).
+Get the raw xml from the CIB (Cluster Information Base). If a filename is provided, we save the CIB to that file, otherwise the CIB is printed. Specify scope to get a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status. \fB\-\-config\fR is the same as scope=configuration. Do not specify a scope if you want to edit the saved CIB using pcs (pcs \-f <command>).
.TP
-cib-push <filename> [scope=<scope> | \fB\-\-config\fR] [\fB\-\-wait\fR[=<n>]]
-Push the raw xml from <filename> to the CIB (Cluster Information Base). You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push. Specify scope to push a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults. \fB\-\-config\fR is the same as scope=configuration. U [...]
+cib\-push <filename> [\fB\-\-wait\fR[=<n>]] [diff\-against=<filename_original> | scope=<scope> | \fB\-\-config\fR]
+Push the raw xml from <filename> to the CIB (Cluster Information Base). You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one\-off push. If diff\-against is specified, pcs diffs contents of filename against contents of filename_original and pushes the result to the CIB. Specify scope to push a specific section of the CIB. Valid values of the scope are: confi [...]
+
+Example:
+ pcs cluster cib > original.xml
+ cp original.xml new.xml
+ pcs \-f new.xml constraint location apache prefers node2
+ pcs cluster cib\-push new.xml diff\-against=original.xml
.TP
cib\-upgrade
Upgrade the CIB to conform to the latest version of the document schema.
@@ -271,11 +280,26 @@ Upgrade the CIB to conform to the latest version of the document schema.
edit [scope=<scope> | \fB\-\-config\fR]
Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving. Specify scope to edit a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults. \fB\-\-config\fR is the same as scope=configuration. Use of \fB\-\-config\fR is recommended. Do not specify a scope if you need to edit the whole CIB or be warned in the case of outdated CIB.
.TP
-node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR] [\fB\-\-watchdog\fR=<watchdog\-path>]
-Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node. If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start. If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node. When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 addre [...]
+node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR] [\fB\-\-watchdog\fR=<watchdog\-path>] [\fB\-\-device\fR=<path>] ...
+Add the node to the cluster and sync all relevant configuration files to the new node. If \fB\-\-start\fR is specified also start cluster on the new node, if \fB\-\-wait\fR is specified wait up to 'n' seconds for the new node to start. If \fB\-\-enable\fR is specified configure cluster to start on the new node on boot. When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address. Use \fB\-\-watchdog\fR to spe [...]
.TP
node remove <node>
-Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster.
+Shutdown specified node and remove it from the cluster.
+.TP
+node add\-remote <node host> [<node name>] [options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-wait\fR[=<n>]]
+Add the node to the cluster as a remote node. Sync all relevant configuration files to the new node. Start the node and configure it to start the cluster on boot. Options are port and reconnect_interval. Operations and meta belong to an underlying connection resource (ocf:pacemaker:remote). If \fB\-\-wait\fR is specified, wait up to 'n' seconds for the node to start.
+.TP
+node remove\-remote <node identifier>
+Shutdown specified remote node and remove it from the cluster. The node\-identifier can be the name of the node or the address of the node.
+.TP
+node add\-guest <node host> <resource id> [options] [\fB\-\-wait\fR[=<n>]]
+Make the specified resource a guest node resource. Sync all relevant configuration files to the new node. Start the node and configure it to start the cluster on boot. Options are remote\-addr, remote\-port and remote\-connect\-timeout. If \fB\-\-wait\fR is specified, wait up to 'n' seconds for the node to start.
+.TP
+node remove\-guest <node identifier>
+Shutdown specified guest node and remove it from the cluster. The node\-identifier can be the name of the node or the address of the node or id of the resource that is used as the guest node.
+.TP
+node clear <node name>
+Remove specified node from various cluster caches. Use this if a removed node is still considered by the cluster to be a member of the cluster.
.TP
uidgid
List the current configured uids and gids of users allowed to connect to corosync.
@@ -293,12 +317,14 @@ reload corosync
Reload the corosync configuration on the current node.
.TP
destroy [\fB\-\-all\fR]
-Permanently destroy the cluster on the current node, killing all corosync/pacemaker processes removing all cib files and the corosync.conf file. Using \fB\-\-all\fR will attempt to destroy the cluster on all nodes configure in the corosync.conf file. WARNING: This command permantly removes any cluster configuration that has been created. It is recommended to run 'pcs cluster stop' before destroying the cluster.
+Permanently destroy the cluster on the current node, killing all cluster processes and removing all cluster configuration files. Using \fB\-\-all\fR will attempt to destroy the cluster on all nodes in the local cluster.
+
+\fBWARNING:\fR This command permanently removes any cluster configuration that has been created. It is recommended to run 'pcs cluster stop' before destroying the cluster.
.TP
verify [\fB\-V\fR] [filename]
Checks the pacemaker configuration (cib) for syntax and common conceptual errors. If no filename is specified the check is performed on the currently running cluster. If \fB\-V\fR is used more verbose output will be printed.
.TP
-report [\fB\-\-from\fR "YYYY\-M\-D H:M:S" [\fB\-\-to\fR "YYYY\-M\-D" H:M:S"]] dest
+report [\fB\-\-from\fR "YYYY\-M\-D H:M:S" [\fB\-\-to\fR "YYYY\-M\-D H:M:S"]] dest
Create a tarball containing everything needed when reporting cluster problems. If \fB\-\-from\fR and \fB\-\-to\fR are not used, the report will include the past 24 hours.
.SS "stonith"
.TP
@@ -308,11 +334,12 @@ Show all currently configured stonith devices or if a stonith id is specified sh
list [filter] [\fB\-\-nodesc\fR]
Show list of all available stonith agents (if filter is provided then only stonith agents matching the filter will be shown). If \fB\-\-nodesc\fR is used then descriptions of stonith agents are not printed.
.TP
-describe <stonith agent>
-Show options for specified stonith agent.
+describe <stonith agent> [\fB\-\-full\fR]
+Show options for specified stonith agent. If \fB\-\-full\fR is specified, all options including advanced ones are shown.
.TP
-create <stonith id> <stonith device type> [stonith device options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...]
-Create stonith device with specified type and options.
+create <stonith id> <stonith device type> [stonith device options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-group\fR <group id> [\fB\-\-before\fR <stonith id> | \fB\-\-after\fR <stonith id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
+Create stonith device with specified type and options. If \fB\-\-group\fR is specified the stonith device is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added stonith device relatively to some stonith device already existing in the group. If\fB\-\-disabled\fR is specified the stonith device is not used. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to start and then return 0 if the stoni [...]
+
.TP
update <stonith id> [stonith device options]
Add/Change options to specified stonith id.
@@ -320,20 +347,26 @@ Add/Change options to specified stonith id.
delete <stonith id>
Remove stonith id from configuration.
.TP
+enable <stonith id> [\fB\-\-wait[=n]\fR]
+Allow the cluster to use the stonith device. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to start and then return 0 if the stonith device is started, or 1 if the stonith device has not yet started. If 'n' is not specified it defaults to 60 minutes.
+.TP
+disable <stonith id> [\fB\-\-wait[=n]\fR]
+Attempt to stop the stonith device if it is running and disallow the cluster to use it. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to stop and then return 0 if the stonith device is stopped or 1 if the stonith device has not stopped. If 'n' is not specified it defaults to 60 minutes.
+.TP
cleanup [<stonith id>] [\fB\-\-node\fR <node>]
-Cleans up the stonith device in the lrmd (useful to reset the status and failcount). This tells the cluster to forget the operation history of a stonith device and re-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a stonith id is not specified then all resources/stonith devices will be cleaned up. If a node is not specified then resources on all nodes will be cleaned up.
+Make the cluster forget the operation history of the stonith device and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a stonith id is not specified then all resources/stonith devices will be cleaned up. If a node is not specified then resources/stonith devices on all nodes will be cleaned up.
.TP
-level
+level [config]
Lists all of the fencing levels currently configured.
.TP
-level add <level> <node> <devices>
-Add the fencing level for the specified node with a comma separated list of devices (stonith ids) to attempt for that node at that level. Fence levels are attempted in numerical order (starting with 1) if a level succeeds (meaning all devices are successfully fenced in that level) then no other levels are tried, and the node is considered fenced.
+level add <level> <target> <stonith id> [stonith id]...
+Add the fencing level for the specified target with the list of stonith devices to attempt for that target at that level. Fence levels are attempted in numerical order (starting with 1). If a level succeeds (meaning all devices are successfully fenced in that level) then no other levels are tried, and the target is considered fenced. Target may be a node name <node_name> or %<node_name> or node%<node_name>, a node name regular expression regexp%<node_pattern> or a node attribute value at [...]
.TP
-level remove <level> [node id] [stonith id] ... [stonith id]
-Removes the fence level for the level, node and/or devices specified. If no nodes or devices are specified then the fence level is removed.
+level remove <level> [target] [stonith id]...
+Removes the fence level for the level, target and/or devices specified. If no target or devices are specified then the fence level is removed. Target may be a node name <node_name> or %<node_name> or node%<node_name>, a node name regular expression regexp%<node_pattern> or a node attribute value attrib%<name>=<value>.
.TP
-level clear [node|stonith id(s)]
-Clears the fence levels on the node (or stonith id) specified or clears all fence levels if a node/stonith id is not specified. If more than one stonith id is specified they must be separated by a comma and no spaces. Example: pcs stonith level clear dev_a,dev_b
+level clear [target|stonith id(s)]
+Clears the fence levels on the target (or stonith id) specified or clears all fence levels if a target/stonith id is not specified. If more than one stonith id is specified they must be separated by a comma and no spaces. Target may be a node name <node_name> or %<node_name> or node%<node_name>, a node name regular expression regexp%<node_pattern> or a node attribute value attrib%<name>=<value>. Example: pcs stonith level clear dev_a,dev_b
.TP
level verify
Verifies all fence devices and nodes specified in fence levels exist.
@@ -348,14 +381,14 @@ Confirm that the host specified is currently down. This command should \fBONLY\
NOTE: It is not checked if the specified node exists in the cluster in order to be able to work with nodes not visible from the local cluster partition.
.TP
-sbd enable [\fB\-\-watchdog\fR=<path>[@<node>]] ... [<SBD_OPTION>=<value>] ...
-Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no) and SBD_STARTMODE (default: clean).
+sbd enable [\fB\-\-watchdog\fR=<path>[@<node>]] ... [\fB\-\-device\fR=<path>[@<node>]] ... [<SBD_OPTION>=<value>] ...
+Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no) and SBD_STARTMODE (default: always). It is possible to specify up to 3 devices per node.
.B WARNING: Cluster has to be restarted in order to apply these changes.
-Example of enabling SBD in cluster with watchdogs on node1 will be /dev/watchdog2, on node2 /dev/watchdog1, /dev/watchdog0 on all other nodes and watchdog timeout will bet set to 10 seconds:
+Example of enabling SBD in cluster with watchdogs on node1 will be /dev/watchdog2, on node2 /dev/watchdog1, /dev/watchdog0 on all other nodes, device /dev/sdb on node1, device /dev/sda on all other nodes and watchdog timeout will bet set to 10 seconds:
-pcs stonith sbd enable \-\-watchdog=/dev/watchdog2 at node1 \-\-watchdog=/dev/watchdog1 at node2 \-\-watchdog=/dev/watchdog0 SBD_WATCHDOG_TIMEOUT=10
+pcs stonith sbd enable \-\-watchdog=/dev/watchdog2 at node1 \-\-watchdog=/dev/watchdog1 at node2 \-\-watchdog=/dev/watchdog0 \-\-device=/dev/sdb at node1 \-\-device=/dev/sda SBD_WATCHDOG_TIMEOUT=10
.TP
sbd disable
@@ -363,8 +396,16 @@ Disable SBD in cluster.
.B WARNING: Cluster has to be restarted in order to apply these changes.
.TP
-sbd status
-Show status of SBD services in cluster.
+sbd device setup \fB\-\-device\fR=<path> [\fB\-\-device\fR=<path>] ... [watchdog\-timeout=<integer>] [allocate\-timeout=<integer>] [loop\-timeout=<integer>] [msgwait\-timeout=<integer>]
+Initialize SBD structures on device(s) with specified timeouts.
+
+.B WARNING: All content on device(s) will be overwritten.
+.TP
+sbd device message <device\-path> <node> <message\-type>
+Manually set a message of the specified type on the device for the node. Possible message types (they are documented in sbd(8) man page): test, reset, off, crashdump, exit, clear
+.TP
+sbd status [\fB\-\-full\fR]
+Show status of SBD services in cluster and local device(s) configured. If \fB\-\-full\fR is specified, also dump of SBD headers on device(s) will be shown.
.TP
sbd config
Show SBD configuration in cluster.
@@ -421,15 +462,15 @@ Remove property from configuration (or remove attribute from specified node if \
.SS "constraint"
.TP
[list|show] \fB\-\-full\fR
-List all current location, order and colocation constraints, if \fB\-\-full\fR is specified also list the constraint ids.
+List all current constraints. If \fB\-\-full\fR is specified also list the constraint ids.
.TP
-location <resource id> prefers <node[=score]>...
-Create a location constraint on a resource to prefer the specified node and score (default score: INFINITY).
+location <resource> prefers <node>[=<score>] [<node>[=<score>]]...
+Create a location constraint on a resource to prefer the specified node with score (default score: INFINITY). Resource may be either a resource id <resource_id> or %<resource_id> or resource%<resource_id>, or a resource name regular expression regexp%<resource_pattern>.
.TP
-location <resource id> avoids <node[=score]>...
-Create a location constraint on a resource to avoid the specified node and score (default score: INFINITY).
+location <resource> avoids <node>[=<score>] [<node>[=<score>]]...
+Create a location constraint on a resource to avoid the specified node with score (default score: INFINITY). Resource may be either a resource id <resource_id> or %<resource_id> or resource%<resource_id>, or a resource name regular expression regexp%<resource_pattern>.
.TP
-location <resource id> rule [id=<rule id>] [resource-discovery=<option>] [role=master|slave] [constraint\-id=<id>] [score=<score>|score-attribute=<attribute>] <expression>
+location <resource> rule [id=<rule id>] [resource\-discovery=<option>] [role=master|slave] [constraint\-id=<id>] [score=<score> | score\-attribute=<attribute>] <expression>
Creates a location rule on the specified resource where the expression looks like one of the following:
.br
defined|not_defined <attribute>
@@ -448,25 +489,25 @@ Creates a location rule on the specified resource where the expression looks lik
.br
( <expression> )
.br
-where duration options and date spec options are: hours, monthdays, weekdays, yeardays, months, weeks, years, weekyears, moon. If score is omitted it defaults to INFINITY. If id is omitted one is generated from the resource id. If resource-discovery is omitted it defaults to 'always'.
+where duration options and date spec options are: hours, monthdays, weekdays, yeardays, months, weeks, years, weekyears, moon. Resource may be either a resource id <resource_id> or %<resource_id> or resource%<resource_id>, or a resource name regular expression regexp%<resource_pattern>. If score is omitted it defaults to INFINITY. If id is omitted one is generated from the resource id. If resource\-discovery is omitted it defaults to 'always'.
.TP
-location [show [resources|nodes [node id|resource id]...] [\fB\-\-full\fR]]
-List all the current location constraints, if 'resources' is specified location constraints are displayed per resource (default), if 'nodes' is specified location constraints are displayed per node. If specific nodes or resources are specified then we only show information about them. If \fB\-\-full\fR is specified show the internal constraint id's as well.
+location [show [resources|nodes [<node>|<resource>]...] [\fB\-\-full\fR]]
+List all the current location constraints. If 'resources' is specified, location constraints are displayed per resource (default). If 'nodes' is specified, location constraints are displayed per node. If specific nodes or resources are specified then we only show information about them. Resource may be either a resource id <resource_id> or %<resource_id> or resource%<resource_id>, or a resource name regular expression regexp%<resource_pattern>. If \fB\-\-full\fR is specified show the int [...]
.TP
-location add <id> <resource id> <node> <score> [resource-discovery=<option>]
-Add a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage.)
+location add <id> <resource> <node> <score> [resource\-discovery=<option>]
+Add a location constraint with the appropriate id for the specified resource, node name and score. Resource may be either a resource id <resource_id> or %<resource_id> or resource%<resource_id>, or a resource name regular expression regexp%<resource_pattern>.
.TP
-location remove <id> [<resource id> <node> <score>]
-Remove a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage.)
+location remove <id>
+Remove a location constraint with the appropriate id.
.TP
order [show] [\fB\-\-full\fR]
List all current ordering constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
.TP
order [action] <resource id> then [action] <resource id> [options]
-Add an ordering constraint specifying actions (start, stop, promote, demote) and if no action is specified the default action will be start. Available options are kind=Optional/Mandatory/Serialize, symmetrical=true/false, require-all=true/false and id=<constraint\-id>.
+Add an ordering constraint specifying actions (start, stop, promote, demote) and if no action is specified the default action will be start. Available options are kind=Optional/Mandatory/Serialize, symmetrical=true/false, require\-all=true/false and id=<constraint\-id>.
.TP
order set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
-Create an ordered set of resources. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Available constraint_options are id=<constraint\-id>, kind=Optional/Mandatory/Serialize and symmetrical=true/false.
+Create an ordered set of resources. Available options are sequential=true/false, require\-all=true/false and action=start/promote/demote/stop. Available constraint_options are id=<constraint\-id>, kind=Optional/Mandatory/Serialize and symmetrical=true/false.
.TP
order remove <resource1> [resourceN]...
Remove resource from any ordering constraint
@@ -478,7 +519,7 @@ colocation add [master|slave] <source resource id> with [master|slave] <target r
Request <source resource> to run on the same node where pacemaker has determined <target resource> should run. Positive values of score mean the resources should be run on the same node, negative values mean the resources should not be run on the same node. Specifying 'INFINITY' (or '\-INFINITY') for the score forces <source resource> to run (or not run) with <target resource> (score defaults to "INFINITY"). A role can be master or slave (if no role is specified, it defaults to 'started').
.TP
colocation set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
-Create a colocation constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Available constraint_options are id, score, score-attribute and score-attribute-mangle.
+Create a colocation constraint with a resource set. Available options are sequential=true/false and role=Stopped/Started/Master/Slave. Available constraint_options are id and either of: score, score\-attribute, score\-attribute\-mangle.
.TP
colocation remove <source resource id> <target resource id>
Remove colocation constraints with specified resources.
@@ -487,21 +528,21 @@ ticket [show] [\fB\-\-full\fR]
List all current ticket constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
.TP
ticket add <ticket> [<role>] <resource id> [<options>] [id=<constraint\-id>]
-Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
+Create a ticket constraint for <resource id>. Available option is loss\-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
.TP
ticket set <resource1> [<resourceN>]... [<options>] [set <resourceX> ... [<options>]] setoptions <constraint_options>
-Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
+Create a ticket constraint with a resource set. Available options are role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint\-id> and loss\-policy=fence/stop/freeze/demote.
.TP
ticket remove <ticket> <resource id>
Remove all ticket constraints with <ticket> from <resource id>.
.TP
-remove [constraint id]...
+remove <constraint id>...
Remove constraint(s) or constraint rules with the specified id(s).
.TP
ref <resource>...
List constraints referencing specified resource.
.TP
-rule add <constraint id> [id=<rule id>] [role=master|slave] [score=<score>|score-attribute=<attribute>] <expression>
+rule add <constraint id> [id=<rule id>] [role=master|slave] [score=<score>|score\-attribute=<attribute>] <expression>
Add a rule to a constraint where the expression looks like one of the following:
.br
defined|not_defined <attribute>
@@ -520,7 +561,7 @@ Add a rule to a constraint where the expression looks like one of the following:
.br
( <expression> )
.br
-where duration options and date spec options are: hours, monthdays, weekdays, yeardays, months, weeks, years, weekyears, moon If score is ommited it defaults to INFINITY. If id is ommited one is generated from the constraint id.
+where duration options and date spec options are: hours, monthdays, weekdays, yeardays, months, weeks, years, weekyears, moon. If score is omitted it defaults to INFINITY. If id is omitted one is generated from the constraint id.
.TP
rule remove <rule id>
Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed.
@@ -558,7 +599,7 @@ status
Show quorum runtime status.
.TP
device add [<generic options>] model <device model> [<model options>]
-Add a quorum device to the cluster. Quorum device needs to be created first by "pcs qdevice setup" command. It is not possible to use more than one quorum device in a cluster simultaneously. Generic options, model and model options are all documented in corosync's corosync\-qdevice(8) man page.
+Add a quorum device to the cluster. Quorum device needs to be created first by "pcs qdevice setup" command. It is not possible to use more than one quorum device in a cluster simultaneously. Generic options, model and model options are all documented in corosync\-qdevice(8) man page.
.TP
device remove
Remove a quorum device from the cluster.
@@ -567,7 +608,7 @@ device status [\fB\-\-full\fR]
Show quorum device runtime status. Using \fB\-\-full\fR will give more detailed output.
.TP
device update [<generic options>] [model <model options>]
-Add/Change quorum device options. Generic options and model options are all documented in corosync's corosync\-qdevice(8) man page. Requires the cluster to be stopped.
+Add/Change quorum device options. Generic options and model options are all documented in corosync\-qdevice(8) man page. Requires the cluster to be stopped.
WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine.
.TP
@@ -657,11 +698,11 @@ View current quorum status.
qdevice <device model> [\fB\-\-full\fR] [<cluster name>]
Show runtime status of specified model of quorum device provider. Using \fB\-\-full\fR will give more detailed output. If <cluster name> is specified, only information about the specified cluster will be displayed.
.TP
-nodes [corosync|both|config]
-View current status of nodes from pacemaker. If 'corosync' is specified, print nodes currently configured in corosync, if 'both' is specified, print nodes from both corosync & pacemaker. If 'config' is specified, print nodes from corosync & pacemaker configuration.
+nodes [corosync | both | config]
+View current status of nodes from pacemaker. If 'corosync' is specified, view current status of nodes from corosync instead. If 'both' is specified, view current status of nodes from both corosync & pacemaker. If 'config' is specified, print nodes from corosync & pacemaker configuration.
.TP
-pcsd [<node>] ...
-Show the current status of pcsd on the specified nodes. When no nodes are specified, status of all nodes is displayed.
+pcsd [<node>]...
+Show current status of pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified.
.TP
xml
View xml version of status (output from crm_mon \fB\-r\fR \fB\-1\fR \fB\-X\fR).
@@ -686,39 +727,39 @@ checkpoint restore <checkpoint_number>
Restore cluster configuration to specified checkpoint.
.TP
import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>]
-Converts CMAN cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output-format op [...]
+Converts CMAN cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output\-format o [...]
.TP
-import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
+import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs\-commands|pcs\-commands\-verbose [dist=<dist>]
Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed. Commands will be saved to 'output' file. For other options see above.
.TP
export pcs\-commands|pcs\-commands\-verbose [output=<filename>] [dist=<dist>]
-Creates a list of pcs commands which upon execution recreates the current cluster running on this node. Commands will be saved to 'output' file or written to stdout if 'output' is not specified. Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages. Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get the list of supported dist [...]
+Creates a list of pcs commands which upon execution recreates the current cluster running on this node. Commands will be saved to 'output' file or written to stdout if 'output' is not specified. Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages. Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get the list of supported dist [...]
.SS "pcsd"
.TP
certkey <certificate file> <key file>
Load custom certificate and key files for use in pcsd.
.TP
-sync-certificates
-Sync pcsd certificates to all nodes found from current corosync.conf file (cluster.conf on systems running Corosync 1.x). WARNING: This will restart pcsd daemon on the nodes.
+sync\-certificates
+Sync pcsd certificates to all nodes in the local cluster. WARNING: This will restart pcsd daemon on the nodes.
.TP
-clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR]
+clear\-auth [\fB\-\-local\fR] [\fB\-\-remote\fR]
Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa. After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth'). Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance.
.SS "node"
.TP
attribute [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...]
Manage node attributes. If no parameters are specified, show attributes of all nodes. If one parameter is specified, show attributes of specified node. If \fB\-\-name\fR is specified, show specified attribute's value from all nodes. If more parameters are specified, set attributes of specified node. Attributes can be removed by setting an attribute without a value.
.TP
-maintenance [\fB\-\-all\fR] | [<node>]...
-Put specified node(s) into maintenance mode, if no node or options are specified the current node will be put into maintenance mode, if \fB\-\-all\fR is specified all nodes will be put into maintenace mode.
+maintenance [\fB\-\-all\fR | <node>...] [\fB\-\-wait\fR[=n]]
+Put specified node(s) into maintenance mode, if no nodes or options are specified the current node will be put into maintenance mode, if \fB\-\-all\fR is specified all nodes will be put into maintenance mode. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be put into maintenance mode and then return 0 on success or 1 if the operation not succeeded yet. If 'n' is not specified it defaults to 60 minutes.
.TP
-unmaintenance [\fB\-\-all\fR] | [<node>]...
-Remove node(s) from maintenance mode, if no node or options are specified the current node will be removed from maintenance mode, if \fB\-\-all\fR is specified all nodes will be removed from maintenance mode.
+unmaintenance [\fB\-\-all\fR | <node>...] [\fB\-\-wait\fR[=n]]
+Remove node(s) from maintenance mode, if no nodes or options are specified the current node will be removed from maintenance mode, if \fB\-\-all\fR is specified all nodes will be removed from maintenance mode. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be removed from maintenance mode and then return 0 on success or 1 if the operation not succeeded yet. If 'n' is not specified it defaults to 60 minutes.
.TP
-standby [\fB\-\-all\fR | <node>] [\fB\-\-wait\fR[=n]]
-Put specified node into standby mode (the node specified will no longer be able to host resources), if no node or options are specified the current node will be put into standby mode, if \fB\-\-all\fR is specified all nodes will be put into standby mode. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be put into standby mode and then return 0 on success or 1 if the operation not succeeded yet. If 'n' is not specified it defaults to 60 minutes.
+standby [\fB\-\-all\fR | <node>...] [\fB\-\-wait\fR[=n]]
+Put specified node(s) into standby mode (the node specified will no longer be able to host resources), if no nodes or options are specified the current node will be put into standby mode, if \fB\-\-all\fR is specified all nodes will be put into standby mode. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be put into standby mode and then return 0 on success or 1 if the operation not succeeded yet. If 'n' is not specified it defaults to 60 minutes.
.TP
-unstandby [\fB\-\-all\fR | <node>] [\fB\-\-wait\fR[=n]]
-Remove node from standby mode (the node specified will now be able to host resources), if no node or options are specified the current node will be removed from standby mode, if \fB\-\-all\fR is specified all nodes will be removed from standby mode. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be removed from standby mode and then return 0 on success or 1 if the operation not succeeded yet. If 'n' is not specified it defaults to 60 minutes.
+unstandby [\fB\-\-all\fR | <node>...] [\fB\-\-wait\fR[=n]]
+Remove node(s) from standby mode (the node specified will now be able to host resources), if no nodes or options are specified the current node will be removed from standby mode, if \fB\-\-all\fR is specified all nodes will be removed from standby mode. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be removed from standby mode and then return 0 on success or 1 if the operation not succeeded yet. If 'n' is not specified it defaults to 60 minutes.
.TP
utilization [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...]
Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If \fB\-\-name\fR is specified, shows specified utilization value from all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram=
@@ -731,16 +772,16 @@ create path=<path> [id=<alert\-id>] [description=<description>] [options [<optio
Define an alert handler with specified path. Id will be automatically generated if it is not specified.
.TP
update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-Update existing alert handler with specified id.
+Update an existing alert handler with specified id.
.TP
remove <alert\-id> ...
Remove alert handlers with specified ids.
.TP
-recipient add <alert\-id> value=<recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
+recipient add <alert\-id> value=<recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
Add new recipient to specified alert handler.
.TP
-recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-Update existing recipient identified by it's id.
+recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
+Update an existing recipient identified by its id.
.TP
recipient remove <recipient\-id> ...
Remove specified recipients.
@@ -767,5 +808,36 @@ Delete the VirtualIP resource
Create the MyStonith stonith fence_virt device which can fence host 'f1'
.B # pcs stonith create MyStonith fence_virt pcmk_host_list=f1
.TP
-Set the stonith-enabled property to false on the cluster (which disables stonith)
+Set the stonith\-enabled property to false on the cluster (which disables stonith)
.B # pcs property set stonith\-enabled=false
+.SH ENVIRONMENT VARIABLES
+.TP
+EDITOR
+ Path to a plain\-text editor. This is used when pcs is requested to present a text for the user to edit.
+.TP
+no_proxy, https_proxy, all_proxy, NO_PROXY, HTTPS_PROXY, ALL_PROXY
+ These environment variables (listed according to their priorities) control how pcs handles proxy servers when connecting to cluster nodes. See curl(1) man page for details.
+.SH SEE ALSO
+http://clusterlabs.org/doc/
+
+.BR pcsd (8)
+
+.BR corosync_overview (8),
+.BR votequorum (5),
+.BR corosync.conf (5),
+.BR corosync\-qdevice (8),
+.BR corosync\-qdevice\-tool (8),
+.BR corosync\-qnetd (8),
+.BR corosync\-qnetd\-tool (8)
+
+.BR crmd (7),
+.BR pengine (7),
+.BR stonithd (7),
+.BR crm_mon (8),
+.BR crm_report (8),
+.BR crm_simulate (8)
+
+.BR boothd (8)
+.BR sbd (8)
+
+.BR clufter (1)
diff --git a/pcs/pcsd.py b/pcs/pcsd.py
index 925ce29..629b4c0 100644
--- a/pcs/pcsd.py
+++ b/pcs/pcsd.py
@@ -5,13 +5,14 @@ from __future__ import (
unicode_literals,
)
-import sys
-import os
import errno
+import os
+import sys
+import time
+from pcs import settings
from pcs import usage
from pcs import utils
-from pcs import settings
def pcsd_cmd(argv):
@@ -108,7 +109,6 @@ def pcsd_sync_certs(argv, exit_after_error=True):
sys.exit(1)
else:
return
- print()
except (KeyError, AttributeError):
utils.err("Unable to communicate with pcsd", exit_after_error)
return
@@ -117,32 +117,7 @@ def pcsd_sync_certs(argv, exit_after_error=True):
return
print("Restarting pcsd on the nodes in order to reload the certificates...")
- pcsd_data = {
- "nodes": nodes_restart,
- }
- output, retval = utils.run_pcsdcli("pcsd_restart_nodes", pcsd_data)
- if retval == 0 and output["status"] == "ok" and output["data"]:
- try:
- restart_result = output["data"]
- if restart_result["node_status"]:
- for node, status in restart_result["node_status"].items():
- print("{0}: {1}".format(node, status["text"]))
- if status["status"] != "ok":
- error = True
- if restart_result["status"] != "ok":
- error = True
- utils.err(restart_result["text"], False)
- if error:
- if exit_after_error:
- sys.exit(1)
- else:
- return
- except (KeyError, AttributeError):
- utils.err("Unable to communicate with pcsd", exit_after_error)
- return
- else:
- utils.err("Unable to restart pcsd", exit_after_error)
- return
+ pcsd_restart_nodes(nodes_restart, exit_after_error)
def pcsd_clear_auth(argv):
output = []
@@ -172,3 +147,70 @@ def pcsd_clear_auth(argv):
for o in output:
print("Error: " + o)
sys.exit(1)
+
+def pcsd_restart_nodes(nodes, exit_after_error=True):
+ pcsd_data = {
+ "nodes": nodes,
+ }
+ instance_signatures = dict()
+
+ error = False
+ output, retval = utils.run_pcsdcli("pcsd_restart_nodes", pcsd_data)
+ if retval == 0 and output["status"] == "ok" and output["data"]:
+ try:
+ restart_result = output["data"]
+ if restart_result["node_status"]:
+ for node, status in restart_result["node_status"].items():
+ # If the request got accepted and we have the instance
+ # signature, we are able to check if the restart was
+ # perfirmed. Otherwise we just print the status. Instance
+ # signature got added in pcs-0.9.156.
+ if status["status"] == "ok":
+ sign = status.get("instance_signature", "")
+ if sign:
+ instance_signatures[node] = sign
+ continue
+ print("{0}: {1}".format(node, status["text"]))
+ if status["status"] != "ok":
+ error = True
+ if restart_result["status"] != "ok":
+ error = True
+ utils.err(restart_result["text"], False)
+ if error:
+ if exit_after_error:
+ sys.exit(1)
+ else:
+ return
+ except (KeyError, AttributeError):
+ utils.err("Unable to communicate with pcsd", exit_after_error)
+ return
+ else:
+ utils.err("Unable to restart pcsd", exit_after_error)
+ return
+
+ # check if the restart was performed already
+ error = False
+ for _ in range(5):
+ if not instance_signatures:
+ # no more nodes to check
+ break
+ time.sleep(2)
+ for node, signature in list(instance_signatures.items()):
+ retval, output = utils.getPcsdInstanceSignature(node)
+ if retval == 0 and signature != output:
+ del instance_signatures[node]
+ print("{0}: Success".format(node))
+ elif retval in (3, 4):
+ # node not authorized or permission denied
+ del instance_signatures[node]
+ utils.err(output, False)
+ error = True
+ # if connection refused or http error occurs the dameon is just
+ # restarting so we'll try it again
+ if instance_signatures:
+ for node in sorted(instance_signatures.keys()):
+ utils.err("{0}: Not restarted".format(node), False)
+ error = True
+ if error and exit_after_error:
+ sys.exit(1)
+
diff --git a/pcs/qdevice.py b/pcs/qdevice.py
index 2591bae..a6dcf7e 100644
--- a/pcs/qdevice.py
+++ b/pcs/qdevice.py
@@ -22,7 +22,7 @@ def qdevice_cmd(lib, argv, modifiers):
sub_cmd, argv_next = argv[0], argv[1:]
try:
if sub_cmd == "help":
- usage.qdevice(argv)
+ usage.qdevice([" ".join(argv_next)] if argv_next else [])
elif sub_cmd == "status":
qdevice_status_cmd(lib, argv_next, modifiers)
elif sub_cmd == "setup":
diff --git a/pcs/quorum.py b/pcs/quorum.py
index 6cd06ca..937b057 100644
--- a/pcs/quorum.py
+++ b/pcs/quorum.py
@@ -25,7 +25,7 @@ def quorum_cmd(lib, argv, modificators):
try:
if sub_cmd == "help":
- usage.quorum(argv)
+ usage.quorum([" ".join(argv_next)] if argv_next else [])
elif sub_cmd == "config":
quorum_config_cmd(lib, argv_next, modificators)
elif sub_cmd == "expected-votes":
@@ -61,6 +61,7 @@ def quorum_device_cmd(lib, argv, modificators):
elif sub_cmd == "update":
quorum_device_update_cmd(lib, argv_next, modificators)
else:
+ sub_cmd = ""
raise CmdLineInputError()
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(
@@ -241,4 +242,3 @@ def quorum_unblock_cmd(argv):
)
utils.set_cib_property("startup-fencing", startup_fencing)
print("Waiting for nodes canceled")
-
diff --git a/pcs/resource.py b/pcs/resource.py
index 54c77c3..4d5f43a 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -7,7 +7,6 @@ from __future__ import (
import sys
import xml.dom.minidom
-from xml.dom.minidom import getDOMImplementation
from xml.dom.minidom import parseString
import re
import textwrap
@@ -22,16 +21,37 @@ from pcs import (
from pcs.settings import pacemaker_wait_timeout_status as \
PACEMAKER_WAIT_TIMEOUT_STATUS
import pcs.lib.cib.acl as lib_acl
-import pcs.lib.pacemaker as lib_pacemaker
from pcs.cli.common.errors import CmdLineInputError
from pcs.cli.common.parse_args import prepare_options
+from pcs.cli.resource.parse_args import (
+ parse_bundle_create_options,
+ parse_bundle_update_options,
+ parse_create as parse_create_args,
+)
+from pcs.lib.env_tools import get_nodes
from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker_values import timeout_to_seconds
+import pcs.lib.pacemaker.live as lib_pacemaker
+from pcs.lib.pacemaker.values import timeout_to_seconds
import pcs.lib.resource_agent as lib_ra
+from pcs.cli.common.console_report import error, warn
+from pcs.lib.commands.resource import _validate_guest_change
RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
+def _detect_guest_change(meta_attributes, allow_not_suitable_command):
+ env = utils.get_lib_env()
+ cib = env.get_cib()
+ env.report_processor.process_list(
+ _validate_guest_change(
+ cib,
+ get_nodes(env.get_corosync_conf(), cib),
+ meta_attributes,
+ allow_not_suitable_command,
+ detect_remove=True,
+ )
+ )
+
def resource_cmd(argv):
if len(argv) < 1:
sub_cmd, argv_next = "show", []
@@ -43,24 +63,13 @@ def resource_cmd(argv):
try:
if sub_cmd == "help":
- usage.resource(argv)
+ usage.resource([" ".join(argv_next)] if argv_next else [])
elif sub_cmd == "list":
resource_list_available(lib, argv_next, modifiers)
elif sub_cmd == "describe":
resource_list_options(lib, argv_next, modifiers)
elif sub_cmd == "create":
- if len(argv_next) < 2:
- usage.resource(["create"])
- sys.exit(1)
- res_id = argv_next.pop(0)
- res_type = argv_next.pop(0)
- ra_values, op_values, meta_values, clone_opts = parse_resource_options(
- argv_next, with_clone=True
- )
- resource_create(
- res_id, res_type, ra_values, op_values, meta_values, clone_opts,
- group=utils.pcs_options.get("--group", None)
- )
+ resource_create(lib, argv_next, modifiers)
elif sub_cmd == "move":
resource_move(argv_next)
elif sub_cmd == "ban":
@@ -108,9 +117,9 @@ def resource_cmd(argv):
elif sub_cmd == "master":
resource_master(argv_next)
elif sub_cmd == "enable":
- resource_enable(argv_next)
+ resource_enable_cmd(lib, argv_next, modifiers)
elif sub_cmd == "disable":
- resource_disable(argv_next)
+ resource_disable_cmd(lib, argv_next, modifiers)
elif sub_cmd == "restart":
resource_restart(argv_next)
elif sub_cmd == "debug-start":
@@ -124,9 +133,9 @@ def resource_cmd(argv):
elif sub_cmd == "debug-monitor":
resource_force_action(sub_cmd, argv_next)
elif sub_cmd == "manage":
- resource_manage(argv_next, True)
+ resource_manage_cmd(lib, argv_next, modifiers)
elif sub_cmd == "unmanage":
- resource_manage(argv_next, False)
+ resource_unmanage_cmd(lib, argv_next, modifiers)
elif sub_cmd == "failcount":
resource_failcount(argv_next)
elif sub_cmd == "op":
@@ -177,6 +186,8 @@ def resource_cmd(argv):
set_resource_utilization(argv_next.pop(0), argv_next)
elif sub_cmd == "get_resource_agent_info":
get_resource_agent_info(argv_next)
+ elif sub_cmd == "bundle":
+ resource_bundle_cmd(lib, argv_next, modifiers)
else:
usage.resource()
sys.exit(1)
@@ -185,14 +196,12 @@ def resource_cmd(argv):
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(e, "resource", sub_cmd)
-def parse_resource_options(argv, with_clone=False):
+def parse_resource_options(argv):
ra_values = []
op_values = []
meta_values = []
- clone_opts = []
op_args = False
meta_args = False
- clone_args = False
for arg in argv:
if arg == "op":
op_args = True
@@ -201,16 +210,8 @@ def parse_resource_options(argv, with_clone=False):
elif arg == "meta":
meta_args = True
op_args = False
- elif with_clone and arg == "clone":
- utils.pcs_options["--clone"] = ""
- clone_args = True
- op_args = False
- meta_args = False
else:
- if clone_args:
- if "=" in arg:
- clone_opts.append(arg)
- elif op_args:
+ if op_args:
if arg == "op":
op_values.append([])
elif "=" not in arg and len(op_values[-1]) != 0:
@@ -223,8 +224,6 @@ def parse_resource_options(argv, with_clone=False):
meta_values.append(arg)
else:
ra_values.append(arg)
- if with_clone:
- return ra_values, op_values, meta_values, clone_opts
return ra_values, op_values, meta_values
@@ -261,11 +260,12 @@ def resource_list_options(lib, argv, modifiers):
agent_name = argv[0]
print(_format_agent_description(
- lib.resource_agent.describe_agent(agent_name)
+ lib.resource_agent.describe_agent(agent_name),
+ show_advanced=modifiers["full"]
))
-def _format_agent_description(description, stonith=False):
+def _format_agent_description(description, stonith=False, show_advanced=False):
output = []
if description.get("name") and description.get("shortdesc"):
@@ -288,7 +288,10 @@ def _format_agent_description(description, stonith=False):
if description.get("parameters"):
output_params = []
for param in description["parameters"]:
- if param.get("advanced", False):
+ # Do not show advanced options, for exmaple
+ # pcmk_(reboot|off|list|monitor|status)_(action|timeout|retries)
+ # for stonith agents
+ if not show_advanced and param.get("advanced", False):
continue
param_title = " ".join(filter(None, [
param.get("name"),
@@ -313,9 +316,7 @@ def _format_agent_description(description, stonith=False):
if description.get("actions"):
output_actions = []
- for action in utils.filter_default_op_from_actions(
- description["actions"]
- ):
+ for action in description["default_actions"]:
parts = [" {0}:".format(action.get("name", ""))]
parts.extend([
"{0}={1}".format(name, value)
@@ -353,245 +354,103 @@ def _format_desc(indent, desc):
return output.rstrip()
-# Create a resource using cibadmin
-# ra_class, ra_type & ra_provider must all contain valid info
-def resource_create(
- ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[],
- group=None
-):
- if "--wait" in utils.pcs_options:
- wait_timeout = utils.validate_wait_get_timeout()
- if "--disabled" in utils.pcs_options:
- utils.err("Cannot use '--wait' together with '--disabled'")
- do_not_run = ["target-role=stopped"]
- if (
- "--master" in utils.pcs_options or "--clone" in utils.pcs_options
- or
- clone_opts
- ):
- do_not_run.extend(["clone-max=0", "clone-node-max=0"])
- for opt in meta_values + clone_opts:
- if opt.lower() in do_not_run:
- utils.err("Cannot use '--wait' together with '%s'" % opt)
-
- ra_id_valid, ra_id_error = utils.validate_xml_id(ra_id, 'resource name')
- if not ra_id_valid:
- utils.err(ra_id_error)
-
-
- try:
- if ":" in ra_type:
- full_agent_name = ra_type
- if full_agent_name.startswith("stonith:"):
- # Maybe we can just try to get a metadata object and if it fails
- # then we know the agent is not valid. Then the is_valid_agent
- # method can be completely removed.
- is_valid_agent = lib_ra.StonithAgent(
- utils.cmd_runner(),
- full_agent_name[len("stonith:"):]
- ).is_valid_metadata()
- else:
- is_valid_agent = lib_ra.ResourceAgent(
- utils.cmd_runner(),
- full_agent_name
- ).is_valid_metadata()
- if not is_valid_agent:
- if "--force" not in utils.pcs_options:
- utils.err("Unable to create resource '{0}', it is not installed on this system (use --force to override)".format(full_agent_name))
- elif not full_agent_name.startswith("stonith:"):
- # stonith is covered in stonith.stonith_create
- if not re.match("^[^:]+(:[^:]+){1,2}$", full_agent_name):
- utils.err(
- "Invalid resource agent name '{0}'".format(
- full_agent_name
- )
- )
- print(
- "Warning: '{0}' is not installed or does not provide valid metadata".format(
- full_agent_name
- )
- )
- else:
- full_agent_name = lib_ra.guess_exactly_one_resource_agent_full_name(
- utils.cmd_runner(),
- ra_type
- ).get_name()
- print("Creating resource '{0}'".format(full_agent_name))
- except lib_ra.ResourceAgentError as e:
- utils.process_library_reports(
- [lib_ra.resource_agent_error_to_report_item(e)]
- )
- except LibraryError as e:
- utils.process_library_reports(e.args)
- agent_name_parts = split_resource_agent_name(full_agent_name)
-
-
- dom = utils.get_cib_dom()
-
- if utils.does_id_exist(dom, ra_id):
- utils.err("unable to create resource/fence device '%s', '%s' already exists on this system" % (ra_id,ra_id))
-
+def resource_create(lib, argv, modifiers):
+ if len(argv) < 2:
+ usage.resource(["create"])
+ sys.exit(1)
- for op_val in op_values:
- if len(op_val) < 2:
- utils.err(
- "When using 'op' you must specify an operation name"
- + " and at least one option"
+ ra_id = argv[0]
+ ra_type = argv[1]
+
+ parts = parse_create_args(argv[2:])
+ parts_sections = ["clone", "master", "bundle"]
+ defined_options = [opt for opt in parts_sections if opt in parts]
+ if modifiers["group"]:
+ defined_options.append("group")
+
+ if len(
+ set(defined_options).intersection(set(parts_sections + ["group"]))
+ ) > 1:
+ raise error(
+ "you can specify only one of {0} or --group".format(
+ ", ".join(parts_sections)
)
- if '=' in op_val[0]:
- utils.err(
- "When using 'op' you must specify an operation name after 'op'"
- )
-
- # If the user specifies an operation value and we find a similar one in
- # the default operations we remove if from the default operations
- op_values_agent = []
- if "--no-default-ops" not in utils.pcs_options:
- default_op_values = utils.get_default_op_values(full_agent_name)
- for def_op in default_op_values:
- match = False
- for op in op_values:
- if op[0] != def_op[0]:
- continue
- match = True
- if match == False:
- op_values_agent.append(def_op)
-
- # find duplicate operations defined in agent and make them unique
- action_intervals = dict()
- for op in op_values_agent:
- if len(op) < 1:
- continue
- op_action = op[0]
- if op_action not in action_intervals:
- action_intervals[op_action] = set()
- for key, op_setting in enumerate(op):
- if key == 0:
- continue
- match = re.match("interval=(.+)", op_setting)
- if match:
- interval = timeout_to_seconds(match.group(1))
- if interval is not None:
- if interval in action_intervals[op_action]:
- old_interval = interval
- while interval in action_intervals[op_action]:
- interval += 1
- op[key] = "interval=%s" % interval
- print(
- ("Warning: changing a %s operation interval from %s"
- + " to %s to make the operation unique")
- % (op_action, old_interval, interval)
- )
- action_intervals[op_action].add(interval)
-
- is_monitor_present = False
- for op in op_values_agent + op_values:
- if len(op) > 0:
- if op[0] == "monitor":
- is_monitor_present = True
- break
- if not is_monitor_present:
- op_values.append(['monitor'])
+ )
- if "--disabled" in utils.pcs_options:
- meta_values = [
- meta for meta in meta_values if not meta.startswith("target-role=")
- ]
- meta_values.append("target-role=Stopped")
+ if "bundle" in parts and len(parts["bundle"]) != 1:
+ raise error("you have to specify exactly one bundle")
-# If it's a master all meta values go to the master
- master_meta_values = []
- if "--master" in utils.pcs_options:
- master_meta_values = meta_values
- meta_values = []
+ if modifiers["before"] and modifiers["after"]:
+ raise error("you cannot specify both --before and --after{0}".format(
+ "" if modifiers["group"] else " and you have to specify --group"
+ ))
- instance_attributes = convert_args_to_instance_variables(ra_values,ra_id)
- primitive_values = agent_name_parts[:]
- primitive_values.insert(0,("id",ra_id))
- meta_attributes = convert_args_to_meta_attrs(meta_values, ra_id)
- if "--force" not in utils.pcs_options:
- params = utils.convert_args_to_tuples(ra_values)
- bad_opts, missing_req_opts = [], []
- try:
- if full_agent_name.startswith("stonith:"):
- metadata = lib_ra.StonithAgent(
- utils.cmd_runner(),
- full_agent_name[len("stonith:"):]
- )
- else:
- metadata = lib_ra.ResourceAgent(
- utils.cmd_runner(),
- full_agent_name
- )
- bad_opts, missing_req_opts = metadata.validate_parameters_values(
- dict(params)
- )
- except lib_ra.ResourceAgentError as e:
- utils.process_library_reports(
- [lib_ra.resource_agent_error_to_report_item(e)]
- )
- except LibraryError as e:
- utils.process_library_reports(e.args)
- if len(bad_opts) != 0:
- utils.err ("resource option(s): '%s', are not recognized for resource type: '%s' (use --force to override)" \
- % (", ".join(sorted(bad_opts)), full_agent_name))
- if len(missing_req_opts) != 0:
- utils.err(
- "missing required option(s): '%s' for resource type: %s"
- " (use --force to override)"
- % (", ".join(missing_req_opts), full_agent_name)
- )
+ if not modifiers["group"]:
+ if modifiers["before"]:
+ raise error("you cannot use --before without --group")
+ elif modifiers["after"]:
+ raise error("you cannot use --after without --group")
+
+ settings = dict(
+ allow_absent_agent=modifiers["force"],
+ allow_invalid_operation=modifiers["force"],
+ allow_invalid_instance_attributes=modifiers["force"],
+ use_default_operations=not modifiers["no-default-ops"],
+ ensure_disabled=modifiers["disabled"],
+ wait=modifiers["wait"],
+ allow_not_suitable_command=modifiers["force"],
+ )
- resource_elem = create_xml_element("primitive", primitive_values, instance_attributes + meta_attributes)
- dom.getElementsByTagName("resources")[0].appendChild(resource_elem)
- # Do not validate default operations defined by a resource agent
- # User did not entered them so we will not confuse him/her with their errors
- for op in op_values_agent:
- dom = resource_operation_add(dom, ra_id, op, validate=False)
- for op in op_values:
- dom = resource_operation_add(
- dom, ra_id, op, validate=True, validate_strict=False
+ if "clone" in parts:
+ lib.resource.create_as_clone(
+ ra_id, ra_type, parts["op"],
+ parts["meta"],
+ parts["options"],
+ parts["clone"],
+ **settings
)
-
- if "--clone" in utils.pcs_options or len(clone_opts) > 0:
- dom, dummy_clone_id = resource_clone_create(dom, [ra_id] + clone_opts)
- if group:
- print("Warning: --group ignored when creating a clone")
- if "--master" in utils.pcs_options:
- print("Warning: --master ignored when creating a clone")
- elif "--master" in utils.pcs_options:
- dom, dummy_master_id = resource_master_create(
- dom, [ra_id] + master_meta_values
+ elif "master" in parts:
+ lib.resource.create_as_master(
+ ra_id, ra_type, parts["op"],
+ parts["meta"],
+ parts["options"],
+ parts["master"],
+ **settings
+ )
+ elif "bundle" in parts:
+ lib.resource.create_into_bundle(
+ ra_id, ra_type, parts["op"],
+ parts["meta"],
+ parts["options"],
+ parts["bundle"][0],
+ **settings
)
- if group:
- print("Warning: --group ignored when creating a master")
- elif group:
- dom = resource_group_add(dom, group, [ra_id])
-
- utils.replace_cib_configuration(dom)
- if "--wait" in utils.pcs_options:
- args = ["crm_resource", "--wait"]
- if wait_timeout:
- args.extend(["--timeout=%s" % wait_timeout])
- output, retval = utils.run(args)
- running_on = utils.resource_running_on(ra_id)
- if retval == 0 and running_on["is_running"]:
- print(running_on["message"])
- else:
- msg = []
- if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
- msg.append("waiting timeout")
- else:
- msg.append(
- "unable to start: '%s', please check logs for failure "
- "information"
- % ra_id
- )
- msg.append(running_on["message"])
- if retval != 0 and output:
- msg.append("\n" + output)
- utils.err("\n".join(msg).strip())
+ elif not modifiers["group"]:
+ lib.resource.create(
+ ra_id, ra_type, parts["op"],
+ parts["meta"],
+ parts["options"],
+ **settings
+ )
+ else:
+ adjacent_resource_id = None
+ put_after_adjacent = False
+ if modifiers["after"]:
+ adjacent_resource_id = modifiers["after"]
+ put_after_adjacent = True
+ if modifiers["before"]:
+ adjacent_resource_id = modifiers["before"]
+ put_after_adjacent = False
+
+ lib.resource.create_in_group(
+ ra_id, ra_type, modifiers["group"], parts["op"],
+ parts["meta"],
+ parts["options"],
+ adjacent_resource_id=adjacent_resource_id,
+ put_after_adjacent=put_after_adjacent,
+ **settings
+ )
def resource_move(argv,clear=False,ban=False):
other_options = []
@@ -634,6 +493,8 @@ def resource_move(argv,clear=False,ban=False):
not utils.dom_get_master(dom, resource_id)
and
not utils.dom_get_clone(dom, resource_id)
+ and
+ not utils.dom_get_bundle(dom, resource_id)
):
utils.err("%s is not a valid resource" % resource_id)
@@ -646,6 +507,8 @@ def resource_move(argv,clear=False,ban=False):
utils.dom_get_resource_clone(dom, resource_id)
or
utils.dom_get_group_clone(dom, resource_id)
+ or
+ utils.dom_get_bundle(dom, resource_id)
)
):
utils.err("cannot move cloned resources")
@@ -822,14 +685,18 @@ def resource_agents(lib, argv, modifiers):
" for {0}".format(argv[0]) if argv else ""
))
-
# Update a resource, removing any args that are empty and adding/updating
# args that are not empty
-def resource_update(res_id,args):
+def resource_update(res_id,args, deal_with_guest_change=True):
dom = utils.get_cib_dom()
# Extract operation arguments
ra_values, op_values, meta_values = parse_resource_options(args)
+ if deal_with_guest_change:
+ _detect_guest_change(
+ prepare_options(meta_values),
+ "--force" in utils.pcs_options,
+ )
wait = False
wait_timeout = None
@@ -1026,7 +893,7 @@ def resource_update_clone_master(
return dom
def resource_operation_add(
- dom, res_id, argv, validate=True, validate_strict=True, before_op=None
+ dom, res_id, argv, validate_strict=True, before_op=None
):
if len(argv) < 1:
usage.resource(["op"])
@@ -1039,12 +906,11 @@ def resource_operation_add(
op_name = argv.pop(0)
op_properties = utils.convert_args_to_tuples(argv)
- if validate:
- if "=" in op_name:
- utils.err(
- "%s does not appear to be a valid operation action" % op_name
- )
- if validate and "--force" not in utils.pcs_options:
+ if "=" in op_name:
+ utils.err(
+ "%s does not appear to be a valid operation action" % op_name
+ )
+ if "--force" not in utils.pcs_options:
valid_attrs = ["id", "name", "interval", "description", "start-delay",
"interval-origin", "timeout", "enabled", "record-pending", "role",
"requires", "on-fail", "OCF_CHECK_LEVEL"]
@@ -1101,36 +967,35 @@ def resource_operation_add(
res_el.appendChild(operations)
else:
operations = operations[0]
- if validate:
- duplicate_op_list = utils.operation_exists(operations, op_el)
- if duplicate_op_list:
- utils.err(
- "operation %s with interval %ss already specified for %s:\n%s"
- % (
- op_el.getAttribute("name"),
- timeout_to_seconds(
- op_el.getAttribute("interval"), True
- ),
- res_id,
- "\n".join([
- operation_to_string(op) for op in duplicate_op_list
- ])
- )
- )
- if validate_strict and "--force" not in utils.pcs_options:
- duplicate_op_list = utils.operation_exists_by_name(
- operations, op_el
+ duplicate_op_list = utils.operation_exists(operations, op_el)
+ if duplicate_op_list:
+ utils.err(
+ "operation %s with interval %ss already specified for %s:\n%s"
+ % (
+ op_el.getAttribute("name"),
+ timeout_to_seconds(
+ op_el.getAttribute("interval"), True
+ ),
+ res_id,
+ "\n".join([
+ operation_to_string(op) for op in duplicate_op_list
+ ])
)
- if duplicate_op_list:
- msg = ("operation {action} already specified for {res}"
- + ", use --force to override:\n{op}")
- utils.err(msg.format(
- action=op_el.getAttribute("name"),
- res=res_id,
- op="\n".join([
- operation_to_string(op) for op in duplicate_op_list
- ])
- ))
+ )
+ if validate_strict and "--force" not in utils.pcs_options:
+ duplicate_op_list = utils.operation_exists_by_name(
+ operations, op_el
+ )
+ if duplicate_op_list:
+ msg = ("operation {action} already specified for {res}"
+ + ", use --force to override:\n{op}")
+ utils.err(msg.format(
+ action=op_el.getAttribute("name"),
+ res=res_id,
+ op="\n".join([
+ operation_to_string(op) for op in duplicate_op_list
+ ])
+ ))
operations.insertBefore(op_el, before_op)
return dom
@@ -1196,6 +1061,11 @@ def resource_operation_remove(res_id, argv):
utils.replace_cib_configuration(dom)
def resource_meta(res_id, argv):
+ _detect_guest_change(
+ prepare_options(argv),
+ "--force" in utils.pcs_options,
+ )
+
dom = utils.get_cib_dom()
resource_el = utils.dom_get_any_resource(dom, res_id)
@@ -1238,61 +1108,6 @@ def resource_meta(res_id, argv):
msg.append("\n" + output)
utils.err("\n".join(msg).strip())
-def convert_args_to_meta_attrs(meta_attrs, ra_id):
- if len(meta_attrs) == 0:
- return []
-
- meta_vars = []
- tuples = utils.convert_args_to_tuples(meta_attrs)
- attribute_id = ra_id + "-meta_attributes"
- for (a,b) in tuples:
- meta_vars.append(("nvpair",[("name",a),("value",b),("id",attribute_id+"-"+a)],[]))
- ret = ("meta_attributes", [[("id"), (attribute_id)]], meta_vars)
- return [ret]
-
-def convert_args_to_instance_variables(ra_values, ra_id):
- tuples = utils.convert_args_to_tuples(ra_values)
- ivs = []
- attribute_id = ra_id + "-instance_attributes"
- for (a,b) in tuples:
- ivs.append(("nvpair",[("name",a),("value",b),("id",attribute_id+"-"+a)],[]))
- ret = ("instance_attributes", [[("id"),(attribute_id)]], ivs)
- return [ret]
-
-def split_resource_agent_name(full_agent_name):
- match = re.match(
- "^(?P<standard>[^:]+)(:(?P<provider>[^:]+))?:(?P<type>[^:]+)$",
- full_agent_name
- )
- if not match:
- utils.err(
- "Invalid resource agent name '{0}'".format(
- full_agent_name
- )
- )
- parts = [
- ("class", match.group("standard")),
- ("type", match.group("type")),
- ]
- if match.group("provider"):
- parts.append(
- ("provider", match.group("provider"))
- )
- return parts
-
-
-def create_xml_element(tag, options, children = []):
- impl = getDOMImplementation()
- newdoc = impl.createDocument(None, tag, None)
- element = newdoc.documentElement
-
- for option in options:
- element.setAttribute(option[0],option[1])
-
- for child in children:
- element.appendChild(create_xml_element(child[0], child[1], child[2]))
-
- return element
def resource_group(argv):
if (len(argv) == 0):
@@ -1404,6 +1219,9 @@ def resource_clone_create(cib_dom, argv, update_existing=False):
if not element:
utils.err("unable to find group or resource: %s" % name)
+ if element.parentNode.tagName == "bundle":
+ utils.err("cannot clone bundle resource")
+
if not update_existing:
if utils.dom_get_resource_clone(cib_dom, name):
utils.err("%s is already a clone resource" % name)
@@ -1575,6 +1393,12 @@ def resource_master_create(dom, argv, update=False, master_id=None):
break
if not rg_found:
utils.err("Unable to find resource or group with id %s" % rg_id)
+
+ if resource.parentNode.tagName == "bundle":
+ utils.err(
+ "cannot make a master/slave resource from a bundle resource"
+ )
+
# If the resource elements parent is a group, and it's the last
# element in the group, we remove the group
if resource.parentNode.tagName == "group" and resource.parentNode.getElementsByTagName("primitive").length <= 1:
@@ -1600,13 +1424,30 @@ def resource_master_create(dom, argv, update=False, master_id=None):
return dom, master_element.getAttribute("id")
-def resource_remove(resource_id, output = True):
+def resource_remove(resource_id, output=True, is_remove_remote_context=False):
dom = utils.get_cib_dom()
# if resource is a clone or a master, work with its child instead
cloned_resource = utils.dom_get_clone_ms_resource(dom, resource_id)
if cloned_resource:
resource_id = cloned_resource.getAttribute("id")
+ bundle = utils.dom_get_bundle(dom, resource_id)
+ if bundle is not None:
+ primitive_el = utils.dom_get_resource_bundle(bundle)
+ if primitive_el is not None:
+ resource_remove(primitive_el.getAttribute("id"))
+ utils.replace_cib_configuration(
+ remove_resource_references(utils.get_cib_dom(), resource_id, output)
+ )
+ args = [
+ "cibadmin", "-o", "resources", "-D", "--xpath",
+ "//bundle[@id='{0}']".format(resource_id)
+ ]
+ dummy_cmdoutput, retVal = utils.run(args)
+ if retVal != 0:
+ utils.err("Unable to remove resource '{0}'".format(resource_id))
+ return True
+
if utils.does_exist('//group[@id="'+resource_id+'"]'):
print("Removing group: " + resource_id + " (and all resources within group)")
group = utils.get_cib_xpath('//group[@id="'+resource_id+'"]')
@@ -1671,7 +1512,11 @@ def resource_remove(resource_id, output = True):
):
sys.stdout.write("Attempting to stop: "+ resource_id + "...")
sys.stdout.flush()
- resource_disable([resource_id])
+ lib = utils.get_library_wrapper()
+ # we are not using wait from disable command, because if wait is not
+ # supported in pacemaker, we don't want error message but we try to
+ # simulate wait by waiting for resource to stop
+ lib.resource.disable([resource_id], False)
output, retval = utils.run(["crm_resource", "--wait"])
if retval != 0 and "unrecognized option '--wait'" in output:
output = ""
@@ -1768,11 +1613,19 @@ def resource_remove(resource_id, output = True):
utils.err("Unable to remove resource '%s' (do constraints exist?)" % (resource_id))
return False
if remote_node_name and not utils.usefile:
+ if not is_remove_remote_context:
+ warn(
+ "This command is not sufficient for removing remote and guest "
+ "nodes. To complete the removal, remove pacemaker authkey and "
+ "stop and disable pacemaker_remote on the node(s) manually."
+ )
+ output, retval = utils.run(["crm_resource", "--wait"])
output, retval = utils.run([
"crm_node", "--force", "--remove", remote_node_name
])
return True
+# moved to pcs.lib.cib.fencing_topology.remove_device_from_all_levels
def stonith_level_rm_device(cib_dom, stn_id):
topology_el_list = cib_dom.getElementsByTagName("fencing-topology")
if not topology_el_list:
@@ -1911,6 +1764,8 @@ def resource_group_add(cib_dom, group_name, resource_ids):
utils.err("cannot group master/slave resources")
if resource.parentNode.tagName == "clone":
utils.err("cannot group clone resources")
+ if resource.parentNode.tagName == "bundle":
+ utils.err("cannot group bundle resources")
resources_to_move.append(resource)
resource_found = True
break
@@ -2049,6 +1904,19 @@ def resource_show(argv, stonith=False):
utils.err("unable to find resource '"+arg+"'")
resource_found = False
+def resource_disable_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ utils.err("You must specify resource(s) to disable")
+ resources = argv
+ lib.resource.disable(resources, modifiers["wait"])
+
+def resource_enable_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ utils.err("You must specify resource(s) to enable")
+ resources = argv
+ lib.resource.enable(resources, modifiers["wait"])
+
+#DEPRECATED, moved to pcs.lib.commands.resource
def resource_disable(argv):
if len(argv) < 1:
utils.err("You must specify a resource to disable")
@@ -2089,76 +1957,6 @@ def resource_disable(argv):
msg.append("\n" + output)
utils.err("\n".join(msg).strip())
-def resource_enable(argv):
- if len(argv) < 1:
- utils.err("You must specify a resource to enable")
-
- resource = argv[0]
- cib_dom = utils.get_cib_dom()
-
- resource_clone = (
- utils.dom_get_clone(cib_dom, resource)
- or
- utils.dom_get_master(cib_dom, resource)
- )
- if resource_clone:
- resource_main = utils.dom_elem_get_clone_ms_resource(resource_clone)
- else:
- resource_main = (
- utils.dom_get_resource(cib_dom, resource)
- or
- utils.dom_get_group(cib_dom, resource)
- )
- if not resource_main:
- utils.err(
- "unable to find a resource/clone/master/group: {0}".format(
- resource
- )
- )
- resource_clone = utils.dom_elem_get_resource_clone_ms_parent(
- resource_main
- )
- resources_to_enable = [resource_main.getAttribute("id")]
- if resource_clone:
- resources_to_enable.append(resource_clone.getAttribute("id"))
-
- for res in resources_to_enable:
- if not is_managed(res):
- print("Warning: '{0}' is unmanaged".format(res))
-
- if "--wait" in utils.pcs_options:
- wait_timeout = utils.validate_wait_get_timeout()
-
- for res in resources_to_enable:
- args = ["crm_resource", "-r", res, "-m", "-d", "target-role"]
- output, retval = utils.run(args)
- if retval != 0:
- utils.err (output)
-
- if "--wait" in utils.pcs_options:
- args = ["crm_resource", "--wait"]
- if wait_timeout:
- args.extend(["--timeout=%s" % wait_timeout])
- output, retval = utils.run(args)
- running_on = utils.resource_running_on(resource)
- if retval == 0 and running_on["is_running"]:
- print(running_on["message"])
- return True
- else:
- msg = []
- if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
- msg.append("waiting timeout")
- else:
- msg.append(
- "unable to start: '%s', please check logs for failure "
- "information"
- % resource
- )
- msg.append(running_on["message"])
- if retval != 0 and output:
- msg.append("\n" + output)
- utils.err("\n".join(msg).strip())
-
def resource_restart(argv):
if len(argv) < 1:
utils.err("You must specify a resource to restart")
@@ -2213,10 +2011,27 @@ def resource_force_action(action, argv):
resource = argv[0]
dom = utils.get_cib_dom()
- if not utils.dom_get_any_resource(dom, resource):
+ if not (
+ utils.dom_get_any_resource(dom, resource)
+ or
+ utils.dom_get_bundle(dom, resource)
+ ):
utils.err(
- "unable to find a resource/clone/master/group: {0}".format(resource)
+ "unable to find a resource/clone/master/group/bundle: {0}".format(
+ resource
+ )
)
+ bundle = utils.dom_get_bundle(dom, resource)
+ if bundle:
+ bundle_resource = utils.dom_get_resource_bundle(bundle)
+ if bundle_resource:
+ utils.err(
+ "unable to {0} a bundle, try the bundle's resource: {1}".format(
+ action, bundle_resource.getAttribute("id")
+ )
+ )
+ else:
+ utils.err("unable to {0} a bundle".format(action))
if utils.dom_get_group(dom, resource):
group_resources = utils.get_group_children(resource)
utils.err(
@@ -2256,59 +2071,19 @@ def resource_force_action(action, argv):
print(output, end="")
sys.exit(retval)
-def resource_manage(argv, set_managed):
- if len(argv) == 0:
- usage.resource()
- sys.exit(1)
-
- for resource in argv:
- if not utils.does_exist("(//primitive|//group|//master|//clone)[@id='"+resource+"']"):
- utils.err("%s doesn't exist." % resource)
-
- dom = utils.get_cib_dom()
- for resource in argv:
- isGroup = False
- isResource = False
- for el in dom.getElementsByTagName("group") + dom.getElementsByTagName("master") + dom.getElementsByTagName("clone"):
- if el.getAttribute("id") == resource:
- group = el
- isGroup = True
- break
-
- if isGroup:
- res_to_manage = []
- for el in group.getElementsByTagName("primitive"):
- res_to_manage.append(el.getAttribute("id"))
- else:
- for el in dom.getElementsByTagName("primitive"):
- if el.getAttribute("id") == resource:
- isResource = True
- break
-
- if not set_managed:
- if isResource:
- (output, retval) = utils.set_unmanaged(resource)
- elif isGroup:
- for res in res_to_manage:
- (output, retval) = utils.set_unmanaged(res)
- retval = 0
- else:
- utils.err("unable to find resource/group: %s")
+def resource_manage_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ utils.err("You must specify resource(s) to manage")
+ resources = argv
+ lib.resource.manage(resources, modifiers["monitor"])
- if retval != 0:
- utils.err("error attempting to unmanage resource: %s" % output)
- else:
- # Remove the meta attribute from the id specified (and all children)
- xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']//meta_attributes/nvpair[@name='is-managed']"
- utils.run(["cibadmin", "-d", "--xpath", xpath, "--force"])
- # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master
- xpath = "(//master|//clone)[(group|primitive)[@id='"+resource+"']]/meta_attributes/nvpair[@name='is-managed']"
- utils.run(["cibadmin", "-D", "--xpath", xpath])
- if isGroup:
- for res in res_to_manage:
- xpath = "(//primitive|//group|//clone|//master)[@id='"+res+"']/meta_attributes/nvpair[@name='is-managed']"
- utils.run(["cibadmin", "-D", "--xpath", xpath])
+def resource_unmanage_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ utils.err("You must specify resource(s) to unmanage")
+ resources = argv
+ lib.resource.unmanage(resources, modifiers["monitor"])
+# moved to pcs.lib.pacemaker.state
def is_managed(resource_id):
state_dom = utils.getClusterState()
for resource_el in state_dom.getElementsByTagName("resource"):
@@ -2426,6 +2201,7 @@ def print_node(node, tab = 0):
print_operations(node, spaces)
for child in node:
print_node(child, tab + 1)
+ return
if node.tag == "clone":
print(spaces + "Clone: " + node.attrib["id"] + get_attrs(node,' (',')'))
print_instance_vars_string(node, spaces)
@@ -2433,12 +2209,14 @@ def print_node(node, tab = 0):
print_operations(node, spaces)
for child in node:
print_node(child, tab + 1)
+ return
if node.tag == "primitive":
print(spaces + "Resource: " + node.attrib["id"] + get_attrs(node,' (',')'))
print_instance_vars_string(node, spaces)
print_meta_vars_string(node, spaces)
print_utilization_string(node, spaces)
print_operations(node, spaces)
+ return
if node.tag == "master":
print(spaces + "Master: " + node.attrib["id"] + get_attrs(node, ' (', ')'))
print_instance_vars_string(node, spaces)
@@ -2446,6 +2224,52 @@ def print_node(node, tab = 0):
print_operations(node, spaces)
for child in node:
print_node(child, tab + 1)
+ return
+ if node.tag == "bundle":
+ print(spaces + "Bundle: " + node.attrib["id"] + get_attrs(node, ' (', ')'))
+ print_bundle_container(node, spaces + " ")
+ print_bundle_network(node, spaces + " ")
+ print_bundle_mapping(
+ "Port Mapping:",
+ node.findall("network/port-mapping"),
+ spaces + " "
+ )
+ print_bundle_mapping(
+ "Storage Mapping:",
+ node.findall("storage/storage-mapping"),
+ spaces + " "
+ )
+ for child in node:
+ print_node(child, tab + 1)
+ return
+
+def print_bundle_container(bundle_el, spaces):
+ # TODO support other types of container once supported by pacemaker
+ container_list = bundle_el.findall("docker")
+ for container_el in container_list:
+ print(
+ spaces
+ +
+ container_el.tag.capitalize()
+ +
+ get_attrs(container_el, ": ", "")
+ )
+
+def print_bundle_network(bundle_el, spaces):
+ network_list = bundle_el.findall("network")
+ for network_el in network_list:
+ attrs_string = get_attrs(network_el)
+ if attrs_string:
+ print(spaces + "Network: " + attrs_string)
+
+def print_bundle_mapping(first_line, map_items, spaces):
+ map_lines = [
+ spaces + " " + get_attrs(item, "", " ") + "(" + item.attrib["id"] + ")"
+ for item in map_items
+ ]
+ if map_lines:
+ print(spaces + first_line)
+ print("\n".join(map_lines))
def print_utilization_string(element, spaces):
output = []
@@ -2519,6 +2343,8 @@ def get_attrs(node, prepend_string = "", append_string = ""):
for attr,val in sorted(node.attrib.items()):
if attr in ["id"]:
continue
+ if " " in val:
+ val = '"' + val + '"'
output += attr + "=" + val + " "
if output != "":
return prepend_string + output.rstrip() + append_string
@@ -2813,3 +2639,57 @@ def get_resource_agent_info(argv):
)
except LibraryError as e:
utils.process_library_reports(e.args)
+
+def resource_bundle_cmd(lib, argv, modifiers):
+ try:
+ if len(argv) < 1:
+ sub_cmd = ""
+ raise CmdLineInputError()
+ sub_cmd, argv_next = argv[0], argv[1:]
+
+ if sub_cmd == "create":
+ resource_bundle_create_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "update":
+ resource_bundle_update_cmd(lib, argv_next, modifiers)
+ else:
+ sub_cmd = ""
+ raise CmdLineInputError()
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(
+ e, "resource", "bundle {0}".format(sub_cmd)
+ )
+
+def resource_bundle_create_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ raise CmdLineInputError()
+
+ bundle_id = argv[0]
+ parts = parse_bundle_create_options(argv[1:])
+ lib.resource.bundle_create(
+ bundle_id,
+ parts["container_type"],
+ parts["container"],
+ parts["network"],
+ parts["port_map"],
+ parts["storage_map"],
+ modifiers["force"],
+ modifiers["wait"]
+ )
+
+def resource_bundle_update_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ raise CmdLineInputError()
+
+ bundle_id = argv[0]
+ parts = parse_bundle_update_options(argv[1:])
+ lib.resource.bundle_update(
+ bundle_id,
+ parts["container"],
+ parts["network"],
+ parts["port_map_add"],
+ parts["port_map_remove"],
+ parts["storage_map_add"],
+ parts["storage_map_remove"],
+ modifiers["force"],
+ modifiers["wait"]
+ )
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index bff96e7..72c91f4 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -19,13 +19,15 @@ corosync_qdevice_net_client_certs_dir = os.path.join(
"qdevice/net/nssdb"
)
corosync_qdevice_net_client_ca_file_name = "qnetd-cacert.crt"
+corosync_authkey_file = os.path.join(corosync_conf_dir, "authkey")
+pacemaker_authkey_file = "/etc/pacemaker/authkey"
cluster_conf_file = "/etc/cluster/cluster.conf"
fence_agent_binaries = "/usr/sbin/"
pengine_binary = "/usr/libexec/pacemaker/pengine"
crmd_binary = "/usr/libexec/pacemaker/crmd"
cib_binary = "/usr/libexec/pacemaker/cib"
stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.155"
+pcs_version = "0.9.158"
crm_report = pacemaker_binaries + "crm_report"
crm_verify = pacemaker_binaries + "crm_verify"
crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
@@ -39,8 +41,14 @@ pcsd_exec_location = "/usr/lib/pcsd/"
cib_dir = "/var/lib/pacemaker/cib/"
pacemaker_uname = "hacluster"
pacemaker_gname = "haclient"
+sbd_binary = "/usr/sbin/sbd"
sbd_watchdog_default = "/dev/watchdog"
sbd_config = "/etc/sysconfig/sbd"
+# this limit is also mentioned in docs, change there as well
+sbd_max_device_num = 3
+# message types are also mentioned in docs, change there as well
+sbd_message_types = ["test", "reset", "off", "crashdump", "exit", "clear"]
pacemaker_wait_timeout_status = 62
booth_config_dir = "/etc/booth"
booth_binary = "/usr/sbin/booth"
+default_request_timeout = 60
diff --git a/pcs/status.py b/pcs/status.py
index 86216ea..d6ade5a 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -17,7 +17,7 @@ from pcs.qdevice import qdevice_status_cmd
from pcs.quorum import quorum_status_cmd
from pcs.cli.common.errors import CmdLineInputError
from pcs.lib.errors import LibraryError
-from pcs.lib.pacemaker_state import ClusterState
+from pcs.lib.pacemaker.state import ClusterState
def status_cmd(argv):
if len(argv) == 0:
diff --git a/pcs/stonith.py b/pcs/stonith.py
index bce346d..aa5fbcd 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -5,9 +5,8 @@ from __future__ import (
unicode_literals,
)
-import sys
-import re
import json
+import sys
from pcs import (
resource,
@@ -15,9 +14,17 @@ from pcs import (
utils,
)
from pcs.cli.common import parse_args
+from pcs.cli.common.console_report import indent, error
from pcs.cli.common.errors import CmdLineInputError
-from pcs.cli.common.reports import build_report_message
-from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs.cli.fencing_topology import target_type_map_cli_to_lib
+from pcs.cli.resource.parse_args import parse_create_simple as parse_create_args
+from pcs.common import report_codes
+from pcs.common.fencing_topology import (
+ TARGET_TYPE_NODE,
+ TARGET_TYPE_REGEXP,
+ TARGET_TYPE_ATTRIBUTE,
+)
+from pcs.lib.errors import LibraryError
import pcs.lib.resource_agent as lib_ra
def stonith_cmd(argv):
@@ -31,13 +38,13 @@ def stonith_cmd(argv):
try:
if sub_cmd == "help":
- usage.stonith(argv)
+ usage.stonith([" ".join(argv_next)] if argv_next else [])
elif sub_cmd == "list":
stonith_list_available(lib, argv_next, modifiers)
elif sub_cmd == "describe":
stonith_list_options(lib, argv_next, modifiers)
elif sub_cmd == "create":
- stonith_create(argv_next)
+ stonith_create(lib, argv_next, modifiers)
elif sub_cmd == "update":
if len(argv_next) > 1:
stn_id = argv_next.pop(0)
@@ -52,9 +59,13 @@ def stonith_cmd(argv):
raise CmdLineInputError()
elif sub_cmd == "show":
resource.resource_show(argv_next, True)
- stonith_level([])
+ levels = stonith_level_config_to_str(
+ lib.fencing_topology.get_config()
+ )
+ if levels:
+ print("\n".join(indent(levels, 1)))
elif sub_cmd == "level":
- stonith_level(argv_next)
+ stonith_level_cmd(lib, argv_next, modifiers)
elif sub_cmd == "fence":
stonith_fence(argv_next)
elif sub_cmd == "cleanup":
@@ -65,6 +76,10 @@ def stonith_cmd(argv):
get_fence_agent_info(argv_next)
elif sub_cmd == "sbd":
sbd_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "enable":
+ resource.resource_enable_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "disable":
+ resource.resource_disable_cmd(lib, argv_next, modifiers)
else:
raise CmdLineInputError()
except LibraryError as e:
@@ -72,6 +87,30 @@ def stonith_cmd(argv):
except CmdLineInputError as e:
utils.exit_on_cmdline_input_errror(e, "stonith", sub_cmd)
+def stonith_level_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ sub_cmd, argv_next = "config", []
+ else:
+ sub_cmd, argv_next = argv[0], argv[1:]
+
+ try:
+ if sub_cmd == "add":
+ stonith_level_add_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "clear":
+ stonith_level_clear_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "config":
+ stonith_level_config_cmd(lib, argv_next, modifiers)
+ elif sub_cmd in ["remove", "delete"]:
+ stonith_level_remove_cmd(lib, argv_next, modifiers)
+ elif sub_cmd == "verify":
+ stonith_level_verify_cmd(lib, argv_next, modifiers)
+ else:
+ sub_cmd = ""
+ raise CmdLineInputError()
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(
+ e, "stonith", "level {0}".format(sub_cmd)
+ )
def stonith_list_available(lib, argv, modifiers):
if len(argv) > 1:
@@ -109,238 +148,239 @@ def stonith_list_options(lib, argv, modifiers):
print(resource._format_agent_description(
lib.stonith_agent.describe_agent(agent_name),
- True
+ stonith=True,
+ show_advanced=modifiers["full"]
))
+def stonith_create(lib, argv, modifiers):
+ if modifiers["before"] and modifiers["after"]:
+ raise error("you cannot specify both --before and --after{0}".format(
+ "" if modifiers["group"] else " and you have to specify --group"
+ ))
+
+ if not modifiers["group"]:
+ if modifiers["before"]:
+ raise error("you cannot use --before without --group")
+ elif modifiers["after"]:
+ raise error("you cannot use --after without --group")
-def stonith_create(argv):
if len(argv) < 2:
usage.stonith(["create"])
sys.exit(1)
- stonith_id = argv.pop(0)
- stonith_type = argv.pop(0)
- st_values, op_values, meta_values = resource.parse_resource_options(
- argv, with_clone=False
- )
+ stonith_id = argv[0]
+ stonith_type = argv[1]
- try:
- metadata = lib_ra.StonithAgent(
- utils.cmd_runner(),
- stonith_type
- )
- if metadata.get_provides_unfencing():
- meta_values = [
- meta for meta in meta_values if not meta.startswith("provides=")
- ]
- meta_values.append("provides=unfencing")
- except lib_ra.ResourceAgentError as e:
- forced = utils.get_modificators().get("force", False)
- if forced:
- severity = ReportItemSeverity.WARNING
- else:
- severity = ReportItemSeverity.ERROR
- utils.process_library_reports([
- lib_ra.resource_agent_error_to_report_item(
- e, severity, not forced
- )
- ])
- except LibraryError as e:
- utils.process_library_reports(e.args)
+ parts = parse_create_args(argv[2:])
- resource.resource_create(
- stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values,
- group=utils.pcs_options.get("--group", None)
+ settings = dict(
+ allow_absent_agent=modifiers["force"],
+ allow_invalid_operation=modifiers["force"],
+ allow_invalid_instance_attributes=modifiers["force"],
+ ensure_disabled=modifiers["disabled"],
+ use_default_operations=not modifiers["no-default-ops"],
+ wait=modifiers["wait"],
)
-def stonith_level(argv):
- if len(argv) == 0:
- stonith_level_show()
- return
-
- subcmd = argv.pop(0)
-
- if subcmd == "add":
- if len(argv) < 3:
- usage.stonith(["level add"])
- sys.exit(1)
- stonith_level_add(argv[0], argv[1], ",".join(argv[2:]))
- elif subcmd in ["remove","delete"]:
- if len(argv) < 1:
- usage.stonith(["level remove"])
- sys.exit(1)
-
- node = ""
- devices = ""
- if len(argv) == 2:
- node = argv[1]
- elif len(argv) > 2:
- node = argv[1]
- devices = ",".join(argv[2:])
-
- stonith_level_rm(argv[0], node, devices)
- elif subcmd == "clear":
- if len(argv) == 0:
- stonith_level_clear()
- else:
- stonith_level_clear(argv[0])
- elif subcmd == "verify":
- stonith_level_verify()
- else:
- print("pcs stonith level: invalid option -- '%s'" % subcmd)
- usage.stonith(["level"])
- sys.exit(1)
-
-def stonith_level_add(level, node, devices):
- dom = utils.get_cib_dom()
-
- if not re.search(r'^\d+$', level) or re.search(r'^0+$', level):
- utils.err("invalid level '{0}', use a positive integer".format(level))
- level = level.lstrip('0')
- if "--force" not in utils.pcs_options:
- for dev in devices.split(","):
- if not utils.is_stonith_resource(dev):
- utils.err("%s is not a stonith id (use --force to override)" % dev)
- corosync_nodes = []
- if utils.hasCorosyncConf():
- corosync_nodes = utils.getNodesFromCorosyncConf()
- pacemaker_nodes = utils.getNodesFromPacemaker()
- if node not in corosync_nodes and node not in pacemaker_nodes:
- utils.err("%s is not currently a node (use --force to override)" % node)
-
- ft = dom.getElementsByTagName("fencing-topology")
- if len(ft) == 0:
- conf = dom.getElementsByTagName("configuration")[0]
- ft = dom.createElement("fencing-topology")
- conf.appendChild(ft)
- else:
- ft = ft[0]
-
- fls = ft.getElementsByTagName("fencing-level")
- for fl in fls:
- if fl.getAttribute("target") == node and fl.getAttribute("index") == level and fl.getAttribute("devices") == devices:
- utils.err("unable to add fencing level, fencing level for node: %s, at level: %s, with device: %s already exists" % (node,level,devices))
-
- new_fl = dom.createElement("fencing-level")
- ft.appendChild(new_fl)
- new_fl.setAttribute("target", node)
- new_fl.setAttribute("index", level)
- new_fl.setAttribute("devices", devices)
- new_fl.setAttribute("id", utils.find_unique_id(dom, "fl-" + node +"-" + level))
-
- utils.replace_cib_configuration(dom)
-
-def stonith_level_rm(level, node, devices):
- dom = utils.get_cib_dom()
-
- if devices != "":
- node_devices_combo = node + "," + devices
+ if not modifiers["group"]:
+ lib.stonith.create(
+ stonith_id, stonith_type, parts["op"],
+ parts["meta"],
+ parts["options"],
+ **settings
+ )
else:
- node_devices_combo = node
+ adjacent_resource_id = None
+ put_after_adjacent = False
+ if modifiers["after"]:
+ adjacent_resource_id = modifiers["after"]
+ put_after_adjacent = True
+ if modifiers["before"]:
+ adjacent_resource_id = modifiers["before"]
+ put_after_adjacent = False
+
+ lib.stonith.create_in_group(
+ stonith_id, stonith_type, modifiers["group"], parts["op"],
+ parts["meta"],
+ parts["options"],
+ adjacent_resource_id=adjacent_resource_id,
+ put_after_adjacent=put_after_adjacent,
+ **settings
+ )
- ft = dom.getElementsByTagName("fencing-topology")
- if len(ft) == 0:
- utils.err("unable to remove fencing level, fencing level for node: %s, at level: %s, with device: %s doesn't exist" % (node,level,devices))
+def stonith_level_parse_node(arg):
+ target_type_candidate, target_value_candidate = parse_args.parse_typed_arg(
+ arg,
+ target_type_map_cli_to_lib.keys(),
+ "node"
+ )
+ target_type = target_type_map_cli_to_lib[target_type_candidate]
+ if target_type == TARGET_TYPE_ATTRIBUTE:
+ target_value = parse_args.split_option(target_value_candidate)
else:
- ft = ft[0]
-
- fls = ft.getElementsByTagName("fencing-level")
-
- if node != "":
- if devices != "":
- found = False
- for fl in fls:
- if fl.getAttribute("target") == node and fl.getAttribute("index") == level and fl.getAttribute("devices") == devices:
- found = True
- break
+ target_value = target_value_candidate
+ return target_type, target_value
- if fl.getAttribute("index") == level and fl.getAttribute("devices") == node_devices_combo:
- found = True
- break
+def stonith_level_normalize_devices(argv):
+ # normalize devices - previously it was possible to delimit devices by both
+ # a comma and a space
+ return ",".join(argv).split(",")
- if found == False:
- utils.err("unable to remove fencing level, fencing level for node: %s, at level: %s, with device: %s doesn't exist" % (node,level,devices))
-
- fl.parentNode.removeChild(fl)
- else:
- for fl in fls:
- if fl.getAttribute("index") == level and (fl.getAttribute("target") == node or fl.getAttribute("devices") == node):
- fl.parentNode.removeChild(fl)
- else:
- for fl in fls:
- if fl.getAttribute("index") == level:
- parent = fl.parentNode
- parent.removeChild(fl)
- if len(parent.getElementsByTagName("fencing-level")) == 0:
- parent.parentNode.removeChild(parent)
- break
-
- utils.replace_cib_configuration(dom)
+def stonith_level_add_cmd(lib, argv, modifiers):
+ if len(argv) < 3:
+ raise CmdLineInputError()
+ target_type, target_value = stonith_level_parse_node(argv[1])
+ lib.fencing_topology.add_level(
+ argv[0],
+ target_type,
+ target_value,
+ stonith_level_normalize_devices(argv[2:]),
+ force_device=modifiers["force"],
+ force_node=modifiers["force"]
+ )
-def stonith_level_clear(node = None):
- dom = utils.get_cib_dom()
- ft = dom.getElementsByTagName("fencing-topology")
+def stonith_level_clear_cmd(lib, argv, modifiers):
+ if len(argv) > 1:
+ raise CmdLineInputError()
- if len(ft) == 0:
+ if not argv:
+ lib.fencing_topology.remove_all_levels()
return
- if node == None:
- ft = ft[0]
- childNodes = ft.childNodes[:]
- for node in childNodes:
- node.parentNode.removeChild(node)
- else:
- fls = dom.getElementsByTagName("fencing-level")
- if len(fls) == 0:
- return
- for fl in fls:
- if fl.getAttribute("target") == node or fl.getAttribute("devices") == node:
- fl.parentNode.removeChild(fl)
-
- utils.replace_cib_configuration(dom)
-
-def stonith_level_verify():
- dom = utils.get_cib_dom()
- corosync_nodes = []
- if utils.hasCorosyncConf():
- corosync_nodes = utils.getNodesFromCorosyncConf()
- pacemaker_nodes = utils.getNodesFromPacemaker()
-
- fls = dom.getElementsByTagName("fencing-level")
- for fl in fls:
- node = fl.getAttribute("target")
- devices = fl.getAttribute("devices")
- for dev in devices.split(","):
- if not utils.is_stonith_resource(dev):
- utils.err("%s is not a stonith id" % dev)
- if node not in corosync_nodes and node not in pacemaker_nodes:
- utils.err("%s is not currently a node" % node)
-
-def stonith_level_show():
- dom = utils.get_cib_dom()
-
- node_levels = {}
- fls = dom.getElementsByTagName("fencing-level")
- for fl in fls:
- node = fl.getAttribute("target")
- level = fl.getAttribute("index")
- devices = fl.getAttribute("devices")
-
- if node in node_levels:
- node_levels[node].append((level,devices))
- else:
- node_levels[node] = [(level,devices)]
-
- if len(node_levels.keys()) == 0:
- return
+ target_type, target_value = stonith_level_parse_node(argv[0])
+ # backward compatibility mode
+ # Command parameters are: node, stonith-list
+ # Both the node and the stonith list are optional. If the node is ommited
+ # and the stonith list is present, there is no way to figure it out, since
+ # there is no specification of what the parameter is. Hence the pre-lib
+ # code tried both. It deleted all levels having the first parameter as
+ # either a node or a device list. Since it was only possible to specify
+ # node as a target back then, this is enabled only in that case.
+ report_item_list = []
+ try:
+ lib.fencing_topology.remove_levels_by_params(
+ None,
+ target_type,
+ target_value,
+ None,
+ # pre-lib code didn't return any error when no level was found
+ ignore_if_missing=True
+ )
+ except LibraryError as e:
+ report_item_list.extend(e.args)
+ if target_type == TARGET_TYPE_NODE:
+ try:
+ lib.fencing_topology.remove_levels_by_params(
+ None,
+ None,
+ None,
+ argv[0].split(","),
+ # pre-lib code didn't return any error when no level was found
+ ignore_if_missing=True
+ )
+ except LibraryError as e:
+ report_item_list.extend(e.args)
+ if report_item_list:
+ raise LibraryError(*report_item_list)
+
+def stonith_level_config_to_str(config):
+ config_data = dict()
+ for level in config:
+ if level["target_type"] not in config_data:
+ config_data[level["target_type"]] = dict()
+ if level["target_value"] not in config_data[level["target_type"]]:
+ config_data[level["target_type"]][level["target_value"]] = []
+ config_data[level["target_type"]][level["target_value"]].append(level)
+
+ lines = []
+ for target_type in [
+ TARGET_TYPE_NODE, TARGET_TYPE_REGEXP, TARGET_TYPE_ATTRIBUTE
+ ]:
+ if not target_type in config_data:
+ continue
+ for target_value in sorted(config_data[target_type].keys()):
+ lines.append("Target: {0}".format(
+ "=".join(target_value) if target_type == TARGET_TYPE_ATTRIBUTE
+ else target_value
+ ))
+ level_lines = []
+ for target_level in sorted(
+ config_data[target_type][target_value],
+ key=lambda level: level["level"]
+ ):
+ level_lines.append("Level {level} - {devices}".format(
+ level=target_level["level"],
+ devices=",".join(target_level["devices"])
+ ))
+ lines.extend(indent(level_lines))
+ return lines
+
+def stonith_level_config_cmd(lib, argv, modifiers):
+ if len(argv) > 0:
+ raise CmdLineInputError()
+ lines = stonith_level_config_to_str(lib.fencing_topology.get_config())
+ # do not print \n when lines are empty
+ if lines:
+ print("\n".join(lines))
- nodes = sorted(node_levels.keys())
+def stonith_level_remove_cmd(lib, argv, modifiers):
+ if len(argv) < 1:
+ raise CmdLineInputError()
+ target_type, target_value, devices = None, None, None
+ level = argv[0]
+ if len(argv) > 1:
+ target_type, target_value = stonith_level_parse_node(argv[1])
+ if len(argv) > 2:
+ devices = stonith_level_normalize_devices(argv[2:])
- for node in nodes:
- print(" Node: " + node)
- for level in sorted(node_levels[node], key=lambda x: int(x[0])):
- print(" Level " + level[0] + " - " + level[1])
+ try:
+ lib.fencing_topology.remove_levels_by_params(
+ level,
+ target_type,
+ target_value,
+ devices
+ )
+ except LibraryError as e:
+ # backward compatibility mode
+ # Command parameters are: level, node, stonith, stonith...
+ # Both the node and the stonith list are optional. If the node is
+ # ommited and the stonith list is present, there is no way to figure it
+ # out, since there is no specification of what the parameter is. Hence
+ # the pre-lib code tried both. First it assumed the first parameter is
+ # a node. If that fence level didn't exist, it assumed the first
+ # parameter is a device. Since it was only possible to specify node as
+ # a target back then, this is enabled only in that case.
+ if target_type != TARGET_TYPE_NODE:
+ raise e
+ level_not_found = False
+ for report_item in e.args:
+ if (
+ report_item.code
+ ==
+ report_codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST
+ ):
+ level_not_found = True
+ break
+ if not level_not_found:
+ raise e
+ target_and_devices = [target_value]
+ if devices:
+ target_and_devices.extend(devices)
+ try:
+ lib.fencing_topology.remove_levels_by_params(
+ level,
+ None,
+ None,
+ target_and_devices
+ )
+ except LibraryError as e_second:
+ raise LibraryError(*(e.args + e_second.args))
+def stonith_level_verify_cmd(lib, argv, modifiers):
+ if len(argv) > 0:
+ raise CmdLineInputError()
+ # raises LibraryError in case of problems, else we don't want to do anything
+ lib.fencing_topology.verify()
def stonith_fence(argv):
if len(argv) != 1:
@@ -423,6 +463,8 @@ def sbd_cmd(lib, argv, modifiers):
sbd_config(lib, argv, modifiers)
elif cmd == "local_config_in_json":
local_sbd_config(lib, argv, modifiers)
+ elif cmd == "device":
+ sbd_device_cmd(lib, argv, modifiers)
else:
raise CmdLineInputError()
except CmdLineInputError as e:
@@ -430,39 +472,80 @@ def sbd_cmd(lib, argv, modifiers):
e, "stonith", "sbd {0}".format(cmd)
)
+def sbd_device_cmd(lib, argv, modifiers):
+ if len(argv) == 0:
+ raise CmdLineInputError()
+ cmd = argv.pop(0)
+ try:
+ if cmd == "setup":
+ sbd_setup_block_device(lib, argv, modifiers)
+ elif cmd == "message":
+ sbd_message(lib, argv, modifiers)
+ else:
+ raise CmdLineInputError()
+ except CmdLineInputError as e:
+ utils.exit_on_cmdline_input_errror(
+ e, "stonith", "sbd device {0}".format(cmd)
+ )
+
def sbd_enable(lib, argv, modifiers):
sbd_cfg = parse_args.prepare_options(argv)
default_watchdog, watchdog_dict = _sbd_parse_watchdogs(
modifiers["watchdog"]
)
+ default_device_list, node_device_dict = _sbd_parse_node_specific_options(
+ modifiers["device"]
+ )
+
lib.sbd.enable_sbd(
default_watchdog,
watchdog_dict,
sbd_cfg,
+ default_device_list=(
+ default_device_list if default_device_list else None
+ ),
+ node_device_dict=node_device_dict if node_device_dict else None,
allow_unknown_opts=modifiers["force"],
- ignore_offline_nodes=modifiers["skip_offline_nodes"]
+ ignore_offline_nodes=modifiers["skip_offline_nodes"],
)
+def _sbd_parse_node_specific_options(arg_list):
+ default_option_list = []
+ node_specific_option_dict = {}
+
+ for arg in arg_list:
+ if "@" in arg:
+ option, node_name = arg.rsplit("@", 1)
+ if node_name in node_specific_option_dict:
+ node_specific_option_dict[node_name].append(option)
+ else:
+ node_specific_option_dict[node_name] = [option]
+ else:
+ default_option_list.append(arg)
+
+ return default_option_list, node_specific_option_dict
+
def _sbd_parse_watchdogs(watchdog_list):
- default_watchdog = None
- watchdog_dict = {}
+ default_watchdog_list, node_specific_watchdog_dict =\
+ _sbd_parse_node_specific_options(watchdog_list)
+ if not default_watchdog_list:
+ default_watchdog = None
+ elif len(default_watchdog_list) == 1:
+ default_watchdog = default_watchdog_list[0]
+ else:
+ raise CmdLineInputError("Multiple watchdog definitions.")
- for watchdog_node in watchdog_list:
- if "@" not in watchdog_node:
- if default_watchdog:
- raise CmdLineInputError("Multiple watchdog definitions.")
- default_watchdog = watchdog_node
- else:
- watchdog, node_name = watchdog_node.rsplit("@", 1)
- if node_name in watchdog_dict:
- raise CmdLineInputError(
- "Multiple watchdog definitions for node '{node}'".format(
- node=node_name
- )
+ watchdog_dict = {}
+ for node, watchdog_list in node_specific_watchdog_dict.items():
+ if len(watchdog_list) > 1:
+ raise CmdLineInputError(
+ "Multiple watchdog definitions for node '{node}'".format(
+ node=node
)
- watchdog_dict[node_name] = watchdog
+ )
+ watchdog_dict[node] = watchdog_list[0]
return default_watchdog, watchdog_dict
@@ -492,11 +575,28 @@ def sbd_status(lib, argv, modifiers):
for node_status in status_list:
status = node_status["status"]
print("{node}: {installed} | {enabled} | {running}".format(
- node=node_status["node"].label,
+ node=node_status["node"],
installed=_bool_to_str(status.get("installed")),
enabled=_bool_to_str(status.get("enabled")),
running=_bool_to_str(status.get("running"))
))
+ device_list = lib.sbd.get_local_devices_info(modifiers["full"])
+ for device in device_list:
+ print()
+ print("Messages list on device '{0}':".format(device["device"]))
+ print("<unknown>" if device["list"] is None else device["list"])
+ if modifiers["full"]:
+ print()
+ print("SBD header on device '{0}':".format(device["device"]))
+ print("<unknown>" if device["dump"] is None else device["dump"])
+
+def _print_per_node_option(config_list, config_option):
+ unknown_value = "<unknown>"
+ for config in config_list:
+ value = unknown_value
+ if config["config"] is not None:
+ value = config["config"].get(config_option, unknown_value)
+ print(" {node}: {value}".format(node=config["node"], value=value))
def sbd_config(lib, argv, modifiers):
@@ -510,23 +610,53 @@ def sbd_config(lib, argv, modifiers):
config = config_list[0]["config"]
- filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"]
+ filtered_options = [
+ "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER", "SBD_DEVICE"
+ ]
+ with_device = False
for key, val in config.items():
+ if key == "SBD_DEVICE":
+ with_device = True
if key in filtered_options:
continue
print("{key}={val}".format(key=key, val=val))
print()
print("Watchdogs:")
- for config in config_list:
- watchdog = "<unknown>"
- if config["config"] is not None:
- watchdog = config["config"].get("SBD_WATCHDOG_DEV", "<unknown>")
- print(" {node}: {watchdog}".format(
- node=config["node"].label,
- watchdog=watchdog
- ))
+ _print_per_node_option(config_list, "SBD_WATCHDOG_DEV")
+
+ if with_device:
+ print()
+ print("Devices:")
+ _print_per_node_option(config_list, "SBD_DEVICE")
def local_sbd_config(lib, argv, modifiers):
print(json.dumps(lib.sbd.get_local_sbd_config()))
+
+
+def sbd_setup_block_device(lib, argv, modifiers):
+ device_list = modifiers["device"]
+ if not device_list:
+ raise CmdLineInputError("No device defined")
+ options = parse_args.prepare_options(argv)
+
+ if not modifiers["force"]:
+ answer = utils.get_terminal_input(
+ (
+ "WARNING: All current content on device(s) '{device}' will be"
+ + " overwritten. Are you sure you want to continue? [y/N] "
+ ).format(device="', '".join(device_list))
+ )
+ if answer.lower() not in ["y", "yes"]:
+ print("Canceled")
+ return
+ lib.sbd.initialize_block_devices(device_list, options)
+
+
+def sbd_message(lib, argv, modifiers):
+ if len(argv) != 3:
+ raise CmdLineInputError()
+
+ device, node, message = argv
+ lib.sbd.set_message(device, node, message)
diff --git a/pcs/test/tools/test/__init__.py b/pcs/test/cib_resource/__init__.py
similarity index 100%
copy from pcs/test/tools/test/__init__.py
copy to pcs/test/cib_resource/__init__.py
diff --git a/pcs/test/cib_resource/common.py b/pcs/test/cib_resource/common.py
new file mode 100644
index 0000000..f1cf918
--- /dev/null
+++ b/pcs/test/cib_resource/common.py
@@ -0,0 +1,29 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import shutil
+
+from lxml import etree
+
+from pcs.test.tools.cib import get_assert_pcs_effect_mixin
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_runner import PcsRunner
+from pcs.test.tools.pcs_unittest import TestCase
+
+def get_cib_resources(cib):
+ return etree.tostring(etree.parse(cib).findall(".//resources")[0])
+
+class ResourceTest(
+ TestCase,
+ get_assert_pcs_effect_mixin(get_cib_resources)
+):
+ empty_cib = rc("cib-empty-1.2.xml")
+ temp_cib = rc("temp-cib.xml")
+
+ def setUp(self):
+ shutil.copy(self.empty_cib, self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
diff --git a/pcs/test/cib_resource/stonith_common.py b/pcs/test/cib_resource/stonith_common.py
new file mode 100644
index 0000000..59a697c
--- /dev/null
+++ b/pcs/test/cib_resource/stonith_common.py
@@ -0,0 +1,30 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import logging
+
+from pcs.cli.common.reports import (
+ LibraryReportProcessorToConsole as ReportProcessor
+)
+from pcs.lib.external import CommandRunner
+from pcs.lib.resource_agent import StonithAgent
+from pcs.test.tools import pcs_unittest as unittest
+
+
+def __can_load_xvm_fence_agent():
+ try:
+ runner = CommandRunner(logging.getLogger("test"), ReportProcessor())
+ StonithAgent(runner, "fence_xvm").validate_metadata()
+ return True
+ except:
+ return False
+
+
+need_load_xvm_fence_agent = unittest.skipUnless(
+ __can_load_xvm_fence_agent(),
+ "test requires the successful load of 'fence_xvm' agent"
+)
diff --git a/pcs/test/cib_resource/test_bundle.py b/pcs/test/cib_resource/test_bundle.py
new file mode 100644
index 0000000..d8c97c6
--- /dev/null
+++ b/pcs/test/cib_resource/test_bundle.py
@@ -0,0 +1,491 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+import shutil
+
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.cib import get_assert_pcs_effect_mixin
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.misc import (
+ get_test_resource as rc,
+ outdent,
+ skip_unless_pacemaker_supports_bundle,
+)
+from pcs.test.tools.pcs_runner import PcsRunner
+
+
+class BundleCreateCommon(
+ TestCase,
+ get_assert_pcs_effect_mixin(
+ lambda cib: etree.tostring(
+ # pylint:disable=undefined-variable
+ etree.parse(cib).findall(".//resources")[0]
+ )
+ )
+):
+ temp_cib = rc("temp-cib.xml")
+
+ def setUp(self):
+ shutil.copy(self.empty_cib, self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleCreateUpgradeCib(BundleCreateCommon):
+ empty_cib = rc("cib-empty.xml")
+
+ def test_success(self):
+ self.assert_effect(
+ "resource bundle create B1 container image=pcs:test",
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """,
+ "CIB has been upgraded to the latest schema version.\n"
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleCreate(BundleCreateCommon):
+ empty_cib = rc("cib-empty-2.8.xml")
+
+ def test_minimal(self):
+ self.assert_effect(
+ "resource bundle create B1 container image=pcs:test",
+ """
+ <resources>
+ <bundle id="B1">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+ )
+
+ def test_all_options(self):
+ self.assert_effect(
+ """
+ resource bundle create B1
+ container replicas=4 replicas-per-host=2 run-command=/bin/true
+ port-map port=1001
+ network control-port=12345 host-interface=eth0 host-netmask=24
+ port-map id=B1-port-map-1001 internal-port=2002 port=2000
+ port-map range=3000-3300
+ storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
+ network ip-range-start=192.168.100.200
+ storage-map id=B1-storage-map source-dir=/tmp/docker2a
+ target-dir=/tmp/docker2b
+ container image=pcs:test masters=0
+ storage-map source-dir-root=/tmp/docker3a
+ target-dir=/tmp/docker3b
+ storage-map id=B1-port-map-1001-1 source-dir-root=/tmp/docker4a
+ target-dir=/tmp/docker4b
+ container network=extra_network_settings options=extra_options
+ """,
+ """
+ <resources>
+ <bundle id="B1">
+ <docker
+ image="pcs:test"
+ masters="0"
+ network="extra_network_settings"
+ options="extra_options"
+ replicas="4"
+ replicas-per-host="2"
+ run-command="/bin/true"
+ />
+ <network
+ control-port="12345"
+ host-interface="eth0"
+ host-netmask="24"
+ ip-range-start="192.168.100.200"
+ >
+ <port-mapping id="B1-port-map-1001-2" port="1001" />
+ <port-mapping
+ id="B1-port-map-1001"
+ internal-port="2002"
+ port="2000"
+ />
+ <port-mapping
+ id="B1-port-map-3000-3300"
+ range="3000-3300"
+ />
+ </network>
+ <storage>
+ <storage-mapping
+ id="B1-storage-map-1"
+ source-dir="/tmp/docker1a"
+ target-dir="/tmp/docker1b"
+ />
+ <storage-mapping
+ id="B1-storage-map"
+ source-dir="/tmp/docker2a"
+ target-dir="/tmp/docker2b"
+ />
+ <storage-mapping
+ id="B1-storage-map-2"
+ source-dir-root="/tmp/docker3a"
+ target-dir="/tmp/docker3b"
+ />
+ <storage-mapping
+ id="B1-port-map-1001-1"
+ source-dir-root="/tmp/docker4a"
+ target-dir="/tmp/docker4b"
+ />
+ </storage>
+ </bundle>
+ </resources>
+ """
+ )
+
+ def test_fail_when_missing_args_1(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle",
+ stdout_start="\nUsage: pcs resource bundle ...\n"
+ )
+
+ def test_fail_when_missing_args_2(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle create",
+ stdout_start="\nUsage: pcs resource bundle create...\n"
+ )
+
+ def test_fail_when_missing_required(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle create B1",
+ "Error: required container option 'image' is missing\n"
+ )
+
+ def test_fail_on_unknown_option(self):
+ self.assert_pcs_fail(
+ "resource bundle create B1 container image=pcs:test extra=option",
+ "Error: invalid container option 'extra', allowed options are: "
+ "image, masters, network, options, replicas, replicas-per-host,"
+ " run-command, use --force to override\n"
+ )
+
+ def test_unknown_option_forced(self):
+ # Test that pcs allows to specify options it does not know about. This
+ # ensures some kind of forward compatibility, so the user will be able
+ # to specify new options. However, as of now the option is not
+ # supported by pacemaker and so the command fails.
+ self.assert_pcs_fail(
+ """
+ resource bundle create B1 container image=pcs:test extra=option
+ --force
+ """
+ ,
+ stdout_start="Error: Unable to update cib\n"
+ )
+
+ def test_more_errors(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle create B#1 container replicas=x",
+ outdent(
+ """\
+ Error: invalid bundle name 'B#1', '#' is not a valid character for a bundle name
+ Error: required container option 'image' is missing
+ Error: 'x' is not a valid replicas value, use a positive integer
+ """
+ )
+ )
+
+ def assert_no_options(self, keyword):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle create B {0}".format(keyword),
+ "Error: No {0} options specified\n".format(keyword),
+ )
+
+ def test_empty_container(self):
+ self.assert_no_options("container")
+
+ def test_empty_network(self):
+ self.assert_no_options("network")
+
+ def test_empty_storage_map(self):
+ self.assert_no_options("storage-map")
+
+ def test_empty_port_map(self):
+ self.assert_no_options("port-map")
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleUpdate(BundleCreateCommon):
+ empty_cib = rc("cib-empty-2.8.xml")
+
+ def fixture_bundle(self, name):
+ self.assert_pcs_success(
+ "resource bundle create {0} container image=pcs:test".format(
+ name
+ )
+ )
+
+ def fixture_bundle_complex(self, name):
+ self.assert_pcs_success(
+ (
+ "resource bundle create {0} "
+ "container image=pcs:test replicas=4 masters=2 "
+ "network control-port=12345 host-interface=eth0 host-netmask=24 "
+ "port-map internal-port=1000 port=2000 "
+ "port-map internal-port=1001 port=2001 "
+ "port-map internal-port=1002 port=2002 "
+ "storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b "
+ "storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b "
+ "storage-map source-dir=/tmp/docker3a target-dir=/tmp/docker3b "
+ ).format(name)
+ )
+
+ def test_fail_when_missing_args_1(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle update",
+ stdout_start="\nUsage: pcs resource bundle update...\n"
+ )
+
+ def test_fail_when_missing_args_2(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle update B port-map",
+ "Error: No port-map options specified\n"
+ )
+
+ def test_fail_when_missing_args_3(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle update B storage-map remove",
+ "Error: When using 'storage-map' you must specify either 'add' and "
+ "options or 'remove' and id(s)\n"
+ )
+
+ def test_bad_id(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle update B1 container image=test",
+ "Error: bundle 'B1' does not exist\n"
+ )
+
+ def test_success(self):
+ self.fixture_bundle_complex("B")
+ self.assert_effect(
+ """
+ resource bundle update B
+ container masters= replicas=6 replicas-per-host=2
+ network control-port= host-interface=eth1
+ ip-range-start=192.168.100.200
+ port-map remove B-port-map-2000 B-port-map-2002
+ port-map add internal-port=1003 port=2003
+ storage-map remove B-storage-map B-storage-map-2
+ storage-map add source-dir=/tmp/docker4a target-dir=/tmp/docker4b
+ """,
+ """
+ <resources>
+ <bundle id="B">
+ <docker
+ image="pcs:test"
+ replicas="6"
+ replicas-per-host="2"
+ />
+ <network
+ host-interface="eth1"
+ host-netmask="24"
+ ip-range-start="192.168.100.200"
+ >
+ <port-mapping
+ id="B-port-map-2001"
+ internal-port="1001"
+ port="2001"
+ />
+ <port-mapping
+ id="B-port-map-2003"
+ internal-port="1003"
+ port="2003"
+ />
+ </network>
+ <storage>
+ <storage-mapping
+ id="B-storage-map-1"
+ source-dir="/tmp/docker2a"
+ target-dir="/tmp/docker2b"
+ />
+ <storage-mapping
+ id="B-storage-map"
+ source-dir="/tmp/docker4a"
+ target-dir="/tmp/docker4b"
+ />
+ </storage>
+ </bundle>
+ </resources>
+ """
+ )
+
+ def test_force_unknown_option(self):
+ self.fixture_bundle("B")
+
+ self.assert_pcs_fail(
+ "resource bundle update B container extra=option",
+ "Error: invalid container option 'extra', allowed options are: "
+ "image, masters, network, options, replicas, replicas-per-host,"
+ " run-command, use --force to override\n"
+ )
+ # Test that pcs allows to specify options it does not know about. This
+ # ensures some kind of forward compatibility, so the user will be able
+ # to specify new options. However, as of now the option is not
+ # supported by pacemaker and so the command fails.
+ self.assert_pcs_fail(
+ "resource bundle update B container extra=option --force",
+ stdout_start="Error: Unable to update cib\n"
+ )
+
+ # no force needed when removing an unknown option
+ self.assert_effect(
+ "resource bundle update B container extra=",
+ """
+ <resources>
+ <bundle id="B">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """
+ )
+
+ def assert_no_options(self, keyword):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource bundle update B {0}".format(keyword),
+ "Error: No {0} options specified\n".format(keyword),
+ )
+
+ def test_empty_container(self):
+ self.assert_no_options("container")
+
+ def test_empty_network(self):
+ self.assert_no_options("network")
+
+ def test_empty_storage_map(self):
+ self.assert_no_options("storage-map")
+
+ def test_empty_port_map(self):
+ self.assert_no_options("port-map")
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleShow(TestCase, AssertPcsMixin):
+ empty_cib = rc("cib-empty-2.8.xml")
+ temp_cib = rc("temp-cib.xml")
+
+ def setUp(self):
+ shutil.copy(self.empty_cib, self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
+
+ def test_minimal(self):
+ self.assert_pcs_success(
+ "resource bundle create B1 container image=pcs:test"
+ )
+ self.assert_pcs_success("resource show B1", outdent(
+ """\
+ Bundle: B1
+ Docker: image=pcs:test
+ """
+ ))
+
+ def test_container(self):
+ self.assert_pcs_success(
+ """
+ resource bundle create B1
+ container image=pcs:test masters=2 replicas=4 options='a b c'
+ """
+ )
+ self.assert_pcs_success("resource show B1", outdent(
+ """\
+ Bundle: B1
+ Docker: image=pcs:test masters=2 options="a b c" replicas=4
+ """
+ ))
+
+ def test_network(self):
+ self.assert_pcs_success(
+ """
+ resource bundle create B1
+ container image=pcs:test
+ network host-interface=eth0 host-netmask=24 control-port=12345
+ """
+ )
+ self.assert_pcs_success("resource show B1", outdent(
+ """\
+ Bundle: B1
+ Docker: image=pcs:test
+ Network: control-port=12345 host-interface=eth0 host-netmask=24
+ """
+ ))
+
+ def test_port_map(self):
+ self.assert_pcs_success(
+ """
+ resource bundle create B1
+ container image=pcs:test
+ port-map id=B1-port-map-1001 internal-port=2002 port=2000
+ port-map range=3000-3300
+ """
+ )
+ self.assert_pcs_success("resource show B1", outdent(
+ """\
+ Bundle: B1
+ Docker: image=pcs:test
+ Port Mapping:
+ internal-port=2002 port=2000 (B1-port-map-1001)
+ range=3000-3300 (B1-port-map-3000-3300)
+ """
+ ))
+
+ def test_storage_map(self):
+ self.assert_pcs_success(
+ """
+ resource bundle create B1
+ container image=pcs:test
+ storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
+ storage-map id=my-storage-map source-dir=/tmp/docker2a
+ target-dir=/tmp/docker2b
+ """
+ )
+ self.assert_pcs_success("resource show B1", outdent(
+ """\
+ Bundle: B1
+ Docker: image=pcs:test
+ Storage Mapping:
+ source-dir=/tmp/docker1a target-dir=/tmp/docker1b (B1-storage-map)
+ source-dir=/tmp/docker2a target-dir=/tmp/docker2b (my-storage-map)
+ """
+ ))
+
+ def test_all(self):
+ self.assert_pcs_success(
+ """
+ resource bundle create B1
+ container image=pcs:test masters=2 replicas=4 options='a b c'
+ network host-interface=eth0 host-netmask=24 control-port=12345
+ port-map id=B1-port-map-1001 internal-port=2002 port=2000
+ port-map range=3000-3300
+ storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
+ storage-map id=my-storage-map source-dir=/tmp/docker2a
+ target-dir=/tmp/docker2b
+ """
+ )
+ self.assert_pcs_success("resource show B1", outdent(
+ """\
+ Bundle: B1
+ Docker: image=pcs:test masters=2 options="a b c" replicas=4
+ Network: control-port=12345 host-interface=eth0 host-netmask=24
+ Port Mapping:
+ internal-port=2002 port=2000 (B1-port-map-1001)
+ range=3000-3300 (B1-port-map-3000-3300)
+ Storage Mapping:
+ source-dir=/tmp/docker1a target-dir=/tmp/docker1b (B1-storage-map)
+ source-dir=/tmp/docker2a target-dir=/tmp/docker2b (my-storage-map)
+ """
+ ))
diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
new file mode 100644
index 0000000..2adef5a
--- /dev/null
+++ b/pcs/test/cib_resource/test_create.py
@@ -0,0 +1,1470 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.misc import (
+ get_test_resource as rc,
+ skip_unless_pacemaker_supports_bundle,
+ skip_unless_pacemaker_supports_systemd,
+)
+from pcs.test.cib_resource.common import ResourceTest
+
+
+class Success(ResourceTest):
+ def test_base_create(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ @skip_unless_pacemaker_supports_systemd()
+ def test_base_create_with_agent_name_including_systemd_instance(self):
+ # crm_resource returns the same metadata for any systemd resource, no
+ # matter if it exists or not
+ self.assert_effect(
+ "resource create R systemd:test at a:b --no-default-ops",
+ """<resources>
+ <primitive class="systemd" id="R" type="test at a:b">
+ <operations>
+ <op id="R-monitor-interval-60" interval="60"
+ name="monitor" timeout="100"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_base_create_with_default_ops(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="R-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_create_with_options(self):
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:IPaddr2"
+ " ip=192.168.0.99 cidr_netmask=32"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="IPaddr2"
+ >
+ <instance_attributes id="R-instance_attributes">
+ <nvpair id="R-instance_attributes-cidr_netmask"
+ name="cidr_netmask" value="32"
+ />
+ <nvpair id="R-instance_attributes-ip" name="ip"
+ value="192.168.0.99"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="R-monitor-interval-10s" interval="10s"
+ name="monitor" timeout="20s"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_create_with_trace_options(self):
+ # trace_ra and trace_file options are not defined in metadata but they
+ # are allowed for all ocf:heartbeat and ocf:pacemaker agents. This test
+ # checks it is possible to set them without --force.
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy"
+ " trace_ra=1 trace_file=/root/trace"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <instance_attributes id="R-instance_attributes">
+ <nvpair id="R-instance_attributes-trace_file"
+ name="trace_file" value="/root/trace"
+ />
+ <nvpair id="R-instance_attributes-trace_ra"
+ name="trace_ra" value="1"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_create_with_options_and_operations(self):
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:IPaddr2"
+ " ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="IPaddr2"
+ >
+ <instance_attributes id="R-instance_attributes">
+ <nvpair id="R-instance_attributes-cidr_netmask"
+ name="cidr_netmask" value="32"
+ />
+ <nvpair id="R-instance_attributes-ip" name="ip"
+ value="192.168.0.99"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="R-monitor-interval-30s" interval="30s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_create_disabled(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --disabled",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_with_clone(self):
+ self.assert_effect(
+ [
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --clone"
+ ,
+ "resource create R ocf:heartbeat:Dummy --no-default-ops clone",
+ ],
+ """<resources>
+ <clone id="R-clone">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </clone>
+ </resources>"""
+ )
+
+ def test_with_clone_options(self):
+ self.assert_effect(
+ [
+ "resource create R ocf:heartbeat:Dummy --no-default-ops"
+ " --cloneopt notify=true"
+ ,
+ "resource create R ocf:heartbeat:Dummy --no-default-ops clone"
+ " notify=true"
+ ,
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --clone"
+ " notify=true"
+ ,
+ ],
+ """<resources>
+ <clone id="R-clone">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="R-clone-meta_attributes">
+ <nvpair id="R-clone-meta_attributes-notify"
+ name="notify" value="true"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>"""
+ )
+
+ def test_with_master(self):
+ self.assert_effect(
+ [
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --master",
+ "resource create R ocf:heartbeat:Dummy --no-default-ops master",
+ ],
+ """<resources>
+ <master id="R-master">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </master>
+ </resources>"""
+ )
+
+ def test_create_with_options_and_meta(self):
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:IPaddr2"
+ " ip=192.168.0.99 cidr_netmask=32 meta is-managed=false"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="IPaddr2"
+ >
+ <instance_attributes id="R-instance_attributes">
+ <nvpair id="R-instance_attributes-cidr_netmask"
+ name="cidr_netmask" value="32"
+ />
+ <nvpair id="R-instance_attributes-ip" name="ip"
+ value="192.168.0.99"
+ />
+ </instance_attributes>
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="R-monitor-interval-10s" interval="10s"
+ name="monitor" timeout="20s"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+class SuccessOperations(ResourceTest):
+ def test_create_with_operations(self):
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy"
+ " op monitor interval=30s"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-30s" interval="30s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_multiple_op_keyword(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops"
+ " op monitor interval=30s op monitor interval=20s"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-30s" interval="30s"
+ name="monitor"
+ />
+ <op id="R-monitor-interval-20s" interval="20s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_multiple_operations_same_op_keyword(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops"
+ " op monitor interval=30s monitor interval=20s"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-30s" interval="30s"
+ name="monitor"
+ />
+ <op id="R-monitor-interval-20s" interval="20s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_multiple_op_options_for_same_action(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops"
+ " op monitor interval=30s timeout=20s"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-30s" interval="30s"
+ name="monitor" timeout="20s"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_op_with_OCF_CHECK_LEVEL(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops"
+ " op monitor interval=30s timeout=20s OCF_CHECK_LEVEL=1"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-30s" interval="30s"
+ name="monitor" timeout="20s"
+ >
+ <instance_attributes
+ id="R-monitor-interval-30s-instance_attributes"
+ >
+ <nvpair
+ id="R-monitor-interval-30s-"""
+ +'instance_attributes-OCF_CHECK_LEVEL"'
+ +"""
+ name="OCF_CHECK_LEVEL" value="1"
+ />
+ </instance_attributes>
+ </op>
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_default_ops_only(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="R-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_merging_default_ops_explictly_specified(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy op start timeout=200",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-0s" interval="0s" name="start"
+ timeout="200"
+ />
+ <op id="R-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_completing_monitor_operation(self):
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_adapt_second_op_interval(self):
+ self.assert_effect(
+ "resource create R ocf:pacemaker:Stateful",
+ """<resources>
+ <primitive class="ocf" id="R" provider="pacemaker"
+ type="Stateful"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" role="Master" timeout="20"
+ />
+ <op id="R-monitor-interval-11" interval="11"
+ name="monitor" role="Slave" timeout="20"
+ />
+ <op id="R-start-interval-0s" interval="0s" name="start"
+ timeout="20"
+ />
+ <op id="R-stop-interval-0s" interval="0s" name="stop"
+ timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ ,
+ "Warning: changing a monitor operation interval from 10 to 11 to"
+ " make the operation unique\n"
+ )
+
+ def test_warn_on_forced_unknown_operation(self):
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy"
+ " op monitro interval=30s --force"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-monitro-interval-30s" interval="30s"
+ name="monitro"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ ,
+ "Warning: 'monitro' is not a valid operation name value, use"
+ " meta-data, migrate_from, migrate_to, monitor, reload, start,"
+ " stop, validate-all\n"
+ )
+
+class SuccessGroup(ResourceTest):
+ def test_with_group(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --group G",
+ """<resources>
+ <group id="G">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>"""
+ )
+
+ def test_with_existing_group(self):
+ self.assert_pcs_success(
+ "resource create R0 ocf:heartbeat:Dummy --no-default-ops --group G"
+ )
+ self.assert_effect(
+ [
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --group"
+ " G"
+ ,
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --group"
+ " G --after R0"
+ ,
+ ],
+ """<resources>
+ <group id="G">
+ <primitive class="ocf" id="R0" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R0-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>"""
+ )
+
+ def test_with_group_with_after(self):
+ self.assert_pcs_success_all([
+ "resource create R0 ocf:heartbeat:Dummy --no-default-ops --group G",
+ "resource create R1 ocf:heartbeat:Dummy --no-default-ops --group G",
+ ])
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --group G"
+ " --after R0"
+ ,
+ """<resources>
+ <group id="G">
+ <primitive class="ocf" id="R0" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R0-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="R1" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R1-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+
+ </group>
+ </resources>"""
+ )
+
+ def test_with_group_with_before(self):
+ self.assert_pcs_success(
+ "resource create R0 ocf:heartbeat:Dummy --no-default-ops --group G"
+ )
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --group G"
+ " --before R0"
+ ,
+ """<resources>
+ <group id="G">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="R0" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R0-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>"""
+ )
+
+class SuccessMaster(ResourceTest):
+ def test_disable_is_on_master_element(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --disabled --master",
+ """<resources>
+ <master id="R-master">
+ <meta_attributes id="R-master-meta_attributes">
+ <nvpair id="R-master-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </master>
+ </resources>"""
+ )
+
+ def test_put_options_after_master_as_its_meta_fix_1(self):
+ """
+ fixes bz 1378107 (do not use master options as primitive options)
+ """
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy state=a"
+ " --master is-managed=false --force"
+ ,
+ """<resources>
+ <master id="R-master">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <instance_attributes id="R-instance_attributes">
+ <nvpair id="R-instance_attributes-state"
+ name="state" value="a"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="R-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="R-master-meta_attributes">
+ <nvpair id="R-master-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ </master>
+ </resources>"""
+ )
+
+ def test_put_options_after_master_as_its_meta_fix_2(self):
+ """
+ fixes bz 1378107 (do not use master options as operations)
+ """
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy state=a op monitor"
+ " interval=10s --master is-managed=false --force"
+ " --no-default-ops"
+ ,
+ """<resources>
+ <master id="R-master">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <instance_attributes id="R-instance_attributes">
+ <nvpair id="R-instance_attributes-state"
+ name="state" value="a"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="R-monitor-interval-10s" interval="10s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="R-master-meta_attributes">
+ <nvpair id="R-master-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ </master>
+ </resources>"""
+ )
+
+ def test_do_not_steal_primitive_meta_options(self):
+ """
+ fixes bz 1378107
+ """
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy meta a=b --master b=c"
+ " --no-default-ops"
+ ,
+ """<resources>
+ <master id="R-master">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-a" name="a"
+ value="b"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="R-master-meta_attributes">
+ <nvpair id="R-master-meta_attributes-b" name="b"
+ value="c"
+ />
+ </meta_attributes>
+ </master>
+ </resources>"""
+ )
+
+ def test_takes_master_meta_attributes(self):
+ self.assert_effect(
+ "resource create --no-default-ops R ocf:heartbeat:IPaddr2"
+ " ip=192.168.0.99 --master cidr_netmask=32"
+ ,
+ """<resources>
+ <master id="R-master">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="IPaddr2"
+ >
+ <instance_attributes id="R-instance_attributes">
+ <nvpair id="R-instance_attributes-ip" name="ip"
+ value="192.168.0.99"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="R-monitor-interval-10s" interval="10s"
+ name="monitor" timeout="20s"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="R-master-meta_attributes">
+ <nvpair id="R-master-meta_attributes-cidr_netmask"
+ name="cidr_netmask" value="32"
+ />
+ </meta_attributes>
+ </master>
+ </resources>"""
+ )
+
+class SuccessClone(ResourceTest):
+ def test_clone_does_not_overshadow_meta_options(self):
+ self.assert_effect(
+ [
+ "resource create R ocf:heartbeat:Dummy meta a=b --clone c=d",
+ "resource create R ocf:heartbeat:Dummy --clone c=d meta a=b",
+ ],
+ """<resources>
+ <clone id="R-clone">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="R-meta_attributes">
+ <nvpair id="R-meta_attributes-a" name="a"
+ value="b"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="R-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="R-clone-meta_attributes">
+ <nvpair id="R-clone-meta_attributes-c" name="c"
+ value="d"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>"""
+ )
+
+ def test_clone_does_not_overshadow_operations(self):
+ self.assert_effect(
+ [
+ "resource create R ocf:heartbeat:Dummy op monitor interval=10"
+ " --clone c=d"
+ ,
+ "resource create R ocf:heartbeat:Dummy --clone c=d"
+ " op monitor interval=10"
+ ,
+ ],
+ """<resources>
+ <clone id="R-clone">
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor"
+ />
+ <op id="R-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="R-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <meta_attributes id="R-clone-meta_attributes">
+ <nvpair id="R-clone-meta_attributes-c" name="c"
+ value="d"
+ />
+ </meta_attributes>
+ </clone>
+ </resources>"""
+ )
+
+ def test_clone_places_disabled_correctly(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:Dummy --clone --disabled",
+ """<resources>
+ <clone id="R-clone">
+ <meta_attributes id="R-clone-meta_attributes">
+ <nvpair id="R-clone-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-0s" interval="0s"
+ name="start" timeout="20"
+ />
+ <op id="R-stop-interval-0s" interval="0s"
+ name="stop" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </clone>
+ </resources>"""
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class Bundle(ResourceTest):
+ empty_cib = rc("cib-empty-2.8.xml")
+
+ def fixture_primitive(self, name, bundle=None):
+ if bundle:
+ self.assert_pcs_success(
+ "resource create {0} ocf:heartbeat:Dummy bundle {1}".format(
+ name, bundle
+ )
+ )
+ else:
+ self.assert_pcs_success(
+ "resource create {0} ocf:heartbeat:Dummy".format(name)
+ )
+
+ def fixture_bundle(self, name):
+ self.assert_pcs_success(
+ "resource bundle create {0} container image=pcs:test".format(
+ name
+ )
+ )
+
+ def test_bundle_id_not_specified(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops bundle"
+ ,
+ "Error: you have to specify exactly one bundle\n"
+ )
+
+ def test_bundle_id_is_not_bundle(self):
+ self.fixture_primitive("R1")
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy bundle R1",
+ "Error: 'R1' is not bundle\n"
+ )
+
+ def test_bundle_id_does_not_exist(self):
+ self.assert_pcs_fail(
+ "resource create R1 ocf:heartbeat:Dummy bundle B",
+ "Error: bundle 'B' does not exist\n"
+ )
+
+ def test_primitive_already_in_bundle(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R1", bundle="B")
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy --no-default-ops bundle B",
+ (
+ "Error: bundle 'B' already contains resource 'R1', a bundle "
+ "may contain at most one resource\n"
+ )
+ )
+
+ def test_success(self):
+ self.fixture_bundle("B")
+ self.assert_effect(
+ "resource create R1 ocf:heartbeat:Dummy --no-default-ops bundle B",
+ """
+ <resources>
+ <bundle id="B">
+ <docker image="pcs:test" />
+ <primitive class="ocf" id="R1" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="R1-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ """
+ )
+
+
+class FailOrWarn(ResourceTest):
+ def test_error_group_clone_combination(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --clone"
+ " --group G"
+ ,
+ "Error: you can specify only one of clone, master, bundle or"
+ " --group\n"
+ )
+
+ def test_error_master_clone_combination(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --clone"
+ " --master"
+ ,
+ "Error: you can specify only one of clone, master, bundle or"
+ " --group\n"
+ )
+
+ def test_error_master_group_combination(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --master"
+ " --group G"
+ ,
+ "Error: you can specify only one of clone, master, bundle or"
+ " --group\n"
+ )
+
+ def test_error_bundle_clone_combination(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --clone"
+ " bundle bundle_id"
+ ,
+ "Error: you can specify only one of clone, master, bundle or"
+ " --group\n"
+ )
+
+ def test_error_bundle_master_combination(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --master"
+ " bundle bundle_id"
+ ,
+ "Error: you can specify only one of clone, master, bundle or"
+ " --group\n"
+ )
+
+ def test_error_bundle_group_combination(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --no-default-ops --group G"
+ " bundle bundle_id"
+ ,
+ "Error: you can specify only one of clone, master, bundle or"
+ " --group\n"
+ )
+
+ def test_fail_when_nonexisting_agent(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:NoExisting",
+ "Error: Agent 'ocf:heartbeat:NoExisting' is not installed or does"
+ " not provide valid metadata: Metadata query for"
+ " ocf:heartbeat:NoExisting failed: -5, use --force to"
+ " override\n"
+ )
+
+ def test_warn_when_forcing_noexistent_agent(self):
+ self.assert_effect(
+ "resource create R ocf:heartbeat:NoExisting --force",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat"
+ type="NoExisting"
+ >
+ <operations>
+ <op id="R-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ "Warning: Agent 'ocf:heartbeat:NoExisting' is not installed or does"
+ " not provide valid metadata: Metadata query for"
+ " ocf:heartbeat:NoExisting failed: -5\n"
+ )
+
+
+ def test_fail_on_invalid_resource_agent_name(self):
+ self.assert_pcs_fail(
+ "resource create R invalid_agent_name",
+ "Error: Unable to find agent 'invalid_agent_name', try specifying"
+ " its full name\n"
+ )
+
+ def test_fail_on_invalid_resource_agent_name_even_if_forced(self):
+ self.assert_pcs_fail(
+ "resource create R invalid_agent_name --force",
+ "Error: Unable to find agent 'invalid_agent_name', try specifying"
+ " its full name\n"
+ )
+
+ def test_fail_when_invalid_agent(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat: --force",
+ "Error: Invalid resource agent name 'ocf:heartbeat:'. Use"
+ " standard:provider:type when standard is 'ocf' or"
+ " standard:type otherwise. List of standards and providers can"
+ " be obtained by using commands 'pcs resource standards' and"
+ " 'pcs resource providers'\n"
+ )
+
+ def test_vail_when_agent_class_is_not_allowed(self):
+ self.assert_pcs_fail(
+ "resource create R invalid:Dummy --force",
+ "Error: Invalid resource agent name 'invalid:Dummy'. Use"
+ " standard:provider:type when standard is 'ocf' or"
+ " standard:type otherwise. List of standards and providers can"
+ " be obtained by using commands 'pcs resource standards' and"
+ " 'pcs resource providers'\n"
+ )
+
+ def test_fail_when_missing_provider_with_ocf_resource_agent(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:Dummy",
+ "Error: Invalid resource agent name 'ocf:Dummy'. Use"
+ " standard:provider:type when standard is 'ocf' or"
+ " standard:type otherwise. List of standards and providers can"
+ " be obtained by using commands 'pcs resource standards' and"
+ " 'pcs resource providers'\n"
+ )
+
+ def test_fail_when_provider_appear_with_non_ocf_resource_agent(self):
+ self.assert_pcs_fail(
+ "resource create R lsb:provider:Dummy --force",
+ "Error: Invalid resource agent name 'lsb:provider:Dummy'. Use"
+ " standard:provider:type when standard is 'ocf' or"
+ " standard:type otherwise. List of standards and providers can"
+ " be obtained by using commands 'pcs resource standards' and"
+ " 'pcs resource providers'\n"
+ )
+
+ def test_print_info_about_agent_completion(self):
+ self.assert_pcs_success(
+ "resource create R delay",
+ "Assumed agent name 'ocf:heartbeat:Delay' (deduced from 'delay')\n"
+ )
+
+ def test_fail_for_unambiguous_agent(self):
+ self.assert_pcs_fail(
+ "resource create R Dummy",
+ "Error: Multiple agents match 'Dummy', please specify full name:"
+ " ocf:heartbeat:Dummy, ocf:pacemaker:Dummy\n"
+ )
+
+ def test_for_options_not_matching_resource_agent(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy a=b c=d",
+ "Error: invalid resource options: 'a', 'c', allowed options are: "
+ "fake, state, trace_file, trace_ra, use --force to override\n"
+ )
+
+ def test_for_missing_options_of_resource_agent(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R IPaddr2",
+ "Error: required resource option 'ip' is missing,"
+ " use --force to override\n"
+ "Assumed agent name 'ocf:heartbeat:IPaddr2' (deduced from"
+ " 'IPaddr2')\n"
+ )
+
+ def test_fail_on_invalid_id(self):
+ self.assert_pcs_fail(
+ "resource create #R ocf:heartbeat:Dummy",
+ "Error: invalid resource name '#R',"
+ " '#' is not a valid first character for a resource name\n"
+ )
+
+ def test_fail_on_existing_id(self):
+ self.assert_pcs_success("resource create R ocf:heartbeat:Dummy")
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy",
+ "Error: 'R' already exists\n"
+ )
+
+ def test_fail_on_unknown_operation(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy op monitro interval=100",
+ "Error: 'monitro' is not a valid operation name value, use"
+ " meta-data, migrate_from, migrate_to, monitor, reload, start,"
+ " stop, validate-all, use --force to override\n"
+ )
+
+ def test_fail_on_ambiguous_value_of_option(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy op monitor timeout=10"
+ " timeout=20"
+ ,
+ "Error: duplicate option 'timeout' with different values '10' and"
+ " '20'\n"
+ )
+
+class FailOrWarnOp(ResourceTest):
+ def test_fail_empty(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy"
+ " op meta is-managed=false"
+ ,
+ "Error: When using 'op' you must specify an operation name and at"
+ " least one option\n"
+ )
+
+ def test_fail_only_name_without_any_option(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy"
+ " op monitor meta is-managed=false"
+ ,
+ "Error: When using 'op' you must specify an operation name and at"
+ " least one option\n"
+ )
+
+ def test_fail_duplicit(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor interval=1h monitor interval=3600sec"
+ " monitor interval=1min monitor interval=60s"
+ ,
+ [
+ "Error: multiple specification of the same operation with the"
+ " same interval:"
+ ,
+ "monitor with intervals 1h, 3600sec",
+ "monitor with intervals 1min, 60s",
+ ]
+ )
+
+ def test_fail_invalid_first_action(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " mo=nitor interval=1min"
+ ,
+ "Error: When using 'op' you must specify an operation name after"
+ " 'op'\n"
+ ,
+ )
+
+ def test_fail_invalid_option(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor interval=1min moni=tor timeout=80s"
+ ,
+ "Error: invalid resource operation option 'moni', allowed options"
+ " are: OCF_CHECK_LEVEL, description, enabled, id, interval,"
+ " interval-origin, name, on-fail, record-pending, requires,"
+ " role, start-delay, timeout\n"
+ )
+
+ def test_fail_on_invalid_role(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor role=abc"
+ ,
+ "Error: 'abc' is not a valid role value, use Master, Slave,"
+ " Started, Stopped\n"
+ )
+
+ def test_force_invalid_role(self):
+ self.assert_pcs_fail(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor role=abc --force"
+ ,
+ "Error: 'abc' is not a valid role value, use Master, Slave,"
+ " Started, Stopped\n"
+ )
+
+ def test_fail_on_invalid_requires(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor requires=Abc"
+ ,
+ "Error: 'Abc' is not a valid requires value, use fencing, nothing,"
+ " quorum, unfencing\n"
+ )
+
+ def test_fail_on_invalid_on_fail(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor on-fail=Abc"
+ ,
+ "Error: 'Abc' is not a valid on-fail value, use block, fence,"
+ " ignore, restart, restart-container, standby, stop\n"
+ )
+
+ def test_fail_on_invalid_record_pending(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor record-pending=Abc"
+ ,
+ "Error: 'Abc' is not a valid record-pending value, use 0, 1, false,"
+ " true\n"
+ )
+
+ def test_fail_on_invalid_enabled(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor enabled=Abc"
+ ,
+ "Error: 'Abc' is not a valid enabled value, use 0, 1, false, true\n"
+ )
+
+ def test_fail_on_combination_of_start_delay_and_interval_origin(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy op"
+ " monitor start-delay=10 interval-origin=20"
+ ,
+ "Error: Only one of resource operation options 'interval-origin'"
+ " and 'start-delay' can be used\n"
+ )
+
+class FailOrWarnGroup(ResourceTest):
+ def test_fail_when_invalid_group(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --group 1",
+ "Error: invalid group name '1', '1' is not a valid first character"
+ " for a group name\n"
+ )
+
+ def test_fail_when_try_use_id_of_another_element(self):
+ self.assert_effect(
+ "resource create R1 ocf:heartbeat:Dummy --no-default-ops meta a=b",
+ """<resources>
+ <primitive class="ocf" id="R1" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="R1-meta_attributes">
+ <nvpair id="R1-meta_attributes-a" name="a" value="b"/>
+ </meta_attributes>
+ <operations>
+ <op id="R1-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy --group R1-meta_attributes",
+ "Error: 'R1-meta_attributes' is not a group\n"
+ )
+
+
+ def test_fail_when_entered_both_after_and_before(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --group G --after S1 --before S2",
+ "Error: you cannot specify both --before and --after\n"
+ )
+
+ def test_fail_when_after_is_used_without_group(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --after S1",
+ "Error: you cannot use --after without --group\n"
+ )
+
+ def test_fail_when_before_is_used_without_group(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --before S1",
+ "Error: you cannot use --before without --group\n"
+ )
+
+ def test_fail_when_before_after_conflicts_and_moreover_without_group(self):
+ self.assert_pcs_fail(
+ "resource create R ocf:heartbeat:Dummy --after S1 --before S2",
+ "Error: you cannot specify both --before and --after"
+ " and you have to specify --group\n"
+ )
+
+ def test_fail_when_before_does_not_exist(self):
+ self.assert_pcs_success(
+ "resource create R0 ocf:heartbeat:Dummy --group G1 "
+ )
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy --group G1 --before R1",
+ "Error: there is no resource 'R1' in the group 'G1'\n"
+ )
+
+ def test_fail_when_use_before_with_new_group(self):
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy --group G1 --before R1",
+ "Error: there is no resource 'R1' in the group 'G1'\n"
+ )
+
+ def test_fail_when_after_does_not_exist(self):
+ self.assert_pcs_success(
+ "resource create R0 ocf:heartbeat:Dummy --group G1 "
+ )
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy --group G1 --after R1",
+ "Error: there is no resource 'R1' in the group 'G1'\n"
+ )
+
+ def test_fail_when_use_after_with_new_group(self):
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy --group G1 --after R1",
+ "Error: there is no resource 'R1' in the group 'G1'\n"
+ )
+
+ def test_fail_when_on_pacemaker_remote_attempt(self):
+ self.assert_pcs_fail(
+ "resource create R2 ocf:pacemaker:remote",
+ "Error: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'"
+ ", use --force to override\n"
+ )
+
+ def test_warn_when_on_pacemaker_remote_attempt(self):
+ self.assert_pcs_success(
+ "resource create R2 ocf:pacemaker:remote --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ def test_fail_when_on_pacemaker_remote_conflict_with_existing_node(self):
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ self.assert_pcs_fail(
+ "resource create R2 ocf:pacemaker:remote server=R --force",
+ "Error: 'R' already exists\n"
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ def test_fail_when_on_pacemaker_remote_conflict_with_existing_id(self):
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote server=R2 --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ self.assert_pcs_fail(
+ "resource create R2 ocf:pacemaker:remote --force",
+ "Error: 'R2' already exists\n"
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ def test_fail_when_on_guest_conflict_with_existing_node(self):
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy meta remote-node=R --force",
+ "Error: 'R' already exists\n"
+ "Warning: this command is not sufficient for creating a guest node"
+ ", use 'pcs cluster node add-guest'\n"
+ )
+
+ def test_fail_when_on_guest_conflict_with_existing_node_host(self):
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote server=HOST --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy meta remote-node=HOST"
+ " --force"
+ ,
+ "Error: 'HOST' already exists\n"
+ "Warning: this command is not sufficient for creating a guest node"
+ ", use 'pcs cluster node add-guest'\n"
+ )
+
+ def test_fail_when_on_guest_conflict_with_existing_node_host_addr(self):
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote server=HOST --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy meta remote-node=A"
+ " remote-addr=HOST --force"
+ ,
+ "Error: 'HOST' already exists\n"
+ "Warning: this command is not sufficient for creating a guest node"
+ ", use 'pcs cluster node add-guest'\n"
+ )
+
+ def test_not_fail_when_on_guest_when_conflict_host_with_name(self):
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote server=HOST --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ self.assert_pcs_success(
+ "resource create R2 ocf:heartbeat:Dummy meta remote-node=HOST"
+ " remote-addr=R --force"
+ ,
+ "Warning: this command is not sufficient for creating a guest node, use"
+ " 'pcs cluster node add-guest'\n"
+ )
+
+ def test_fail_when_on_pacemaker_remote_guest_attempt(self):
+ self.assert_pcs_fail(
+ "resource create R2 ocf:heartbeat:Dummy meta remote-node=HOST",
+ "Error: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest', use --force to override\n"
+ )
+
+ def test_warn_when_on_pacemaker_remote_guest_attempt(self):
+ self.assert_pcs_success(
+ "resource create R2 ocf:heartbeat:Dummy meta remote-node=HOST"
+ " --force"
+ ,
+ "Warning: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest'\n"
+ )
diff --git a/pcs/test/cib_resource/test_manage_unmanage.py b/pcs/test/cib_resource/test_manage_unmanage.py
new file mode 100644
index 0000000..5b78646
--- /dev/null
+++ b/pcs/test/cib_resource/test_manage_unmanage.py
@@ -0,0 +1,277 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+import shutil
+
+from pcs.test.tools.cib import get_assert_pcs_effect_mixin
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_runner import PcsRunner
+
+
+class ManageUnmanage(
+ TestCase,
+ get_assert_pcs_effect_mixin(
+ lambda cib: etree.tostring(
+ etree.parse(cib).findall(".//resources")[0]
+ )
+ )
+):
+ empty_cib = rc("cib-empty.xml")
+ temp_cib = rc("temp-cib.xml")
+
+ cib_unmanaged_a = """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="B-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+
+ def setUp(self):
+ shutil.copy(self.empty_cib, self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
+
+ def fixture_resource(self, name, managed=True, with_monitors=False):
+ self.assert_pcs_success(
+ "resource create {0} ocf:heartbeat:Dummy --no-default-ops".format(
+ name
+ )
+ )
+ if not managed:
+ self.assert_pcs_success(
+ "resource unmanage {0} {1}".format(
+ name,
+ "--monitor" if with_monitors else ""
+ )
+ )
+
+ def test_unmanage_none(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource unmanage",
+ "Error: You must specify resource(s) to unmanage\n"
+ )
+
+ def test_manage_none(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "resource manage",
+ "Error: You must specify resource(s) to manage\n"
+ )
+
+ def test_unmanage_one(self):
+ self.fixture_resource("A")
+ self.fixture_resource("B")
+ self.assert_effect("resource unmanage A", self.cib_unmanaged_a)
+
+ def test_manage_one(self):
+ self.fixture_resource("A", managed=False)
+ self.fixture_resource("B", managed=False)
+ self.assert_effect("resource manage B", self.cib_unmanaged_a)
+
+ def test_unmanage_monitor(self):
+ self.fixture_resource("A")
+ self.assert_effect(
+ "resource unmanage A --monitor",
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20" enabled="false"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
+
+ def test_unmanage_monitor_enabled(self):
+ self.fixture_resource("A")
+ self.assert_effect(
+ "resource unmanage A",
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
+
+ def test_manage_monitor(self):
+ self.fixture_resource("A", managed=True, with_monitors=True)
+ self.assert_effect(
+ "resource manage A --monitor",
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
+
+ def test_manage_monitor_disabled(self):
+ self.fixture_resource("A", managed=False, with_monitors=True)
+ self.assert_effect(
+ "resource manage A",
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20" enabled="false"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """,
+ "Warning: Resource 'A' has no enabled monitor operations."
+ " Re-run with '--monitor' to enable them.\n"
+ )
+
+ def test_unmanage_more(self):
+ self.fixture_resource("A")
+ self.fixture_resource("B")
+ self.assert_effect(
+ "resource unmanage A B",
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <meta_attributes id="B-meta_attributes">
+ <nvpair id="B-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="B-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
+
+ def test_manage_more(self):
+ self.fixture_resource("A", managed=False)
+ self.fixture_resource("B", managed=False)
+ self.assert_effect(
+ "resource manage A B",
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="B" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="B-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
+
+ def test_unmanage_nonexistent(self):
+ self.fixture_resource("A")
+
+ self.assert_pcs_fail(
+ "resource unmanage A B",
+ "Error: resource/clone/master/group 'B' does not exist\n"
+ )
+ self.assert_resources_xml_in_cib(
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
+
+ def test_manage_nonexistent(self):
+ self.fixture_resource("A", managed=False)
+
+ self.assert_pcs_fail(
+ "resource manage A B",
+ "Error: resource/clone/master/group 'B' does not exist\n"
+ )
+ self.assert_resources_xml_in_cib(
+ """
+ <resources>
+ <primitive class="ocf" id="A" provider="heartbeat" type="Dummy">
+ <meta_attributes id="A-meta_attributes">
+ <nvpair id="A-meta_attributes-is-managed"
+ name="is-managed" value="false"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="A-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>
+ """
+ )
diff --git a/pcs/test/cib_resource/test_operation_add.py b/pcs/test/cib_resource/test_operation_add.py
new file mode 100644
index 0000000..a842643
--- /dev/null
+++ b/pcs/test/cib_resource/test_operation_add.py
@@ -0,0 +1,135 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+import shutil
+
+from pcs.test.cib_resource.common import get_cib_resources
+from pcs.test.tools.cib import get_assert_pcs_effect_mixin
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_runner import PcsRunner
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+class Success(
+ TestCase,
+ get_assert_pcs_effect_mixin(get_cib_resources)
+):
+ temp_cib = rc("temp-cib.xml")
+ def setUp(self):
+ self.prepare_cib_file()
+ self.pcs_runner = PcsRunner(self.temp_cib)
+
+ def prepare_cib_file(self):
+ with open(self.temp_cib, "w") as temp_cib_file:
+ temp_cib_file.write(self.fixture_cib_cache())
+
+ def fixture_cib_cache(self):
+ if not hasattr(self.__class__, "cib_cache"):
+ self.__class__.cib_cache = self.fixture_cib()
+ return self.__class__.cib_cache
+
+ def fixture_cib(self):
+ shutil.copy(rc('cib-empty-1.2.xml'), self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
+ self.assert_pcs_success(
+ "resource create --no-default-ops R ocf:heartbeat:Dummy"
+ )
+ #add to cib:
+ # <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ # <operations>
+ # <op id="R-monitor-interval-60s" interval="60s"
+ # name="monitor"
+ # />
+ # </operations>
+ # </primitive>
+ cib_content = open(self.temp_cib).read()
+
+ #clean
+ self.pcs_runner = None
+ shutil.copy(rc('cib-empty-1.2.xml'), self.temp_cib)
+
+ return cib_content
+
+ def test_base_add(self):
+ self.assert_effect(
+ "resource op add R start interval=20s",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-20s" interval="20s"
+ name="start"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_add_with_OCF_CHECK_LEVEL(self):
+ self.assert_effect(
+ "resource op add R start interval=20s OCF_CHECK_LEVEL=1"
+ " description=test-description"
+ ,
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op description="test-description" name="start"
+ id="R-start-interval-20s" interval="20s"
+ >
+ <instance_attributes
+ id="params-R-start-interval-20s"
+ >
+ <nvpair
+ id="R-start-interval-20s-OCF_CHECK_LEVEL-1"
+ name="OCF_CHECK_LEVEL" value="1"
+ />
+ </instance_attributes>
+ </op>
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_can_multiple_operation_add(self):
+ self.assert_effect(
+ "resource op add R start interval=20s",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-20s" interval="20s"
+ name="start"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+ self.assert_effect(
+ "resource op add R stop interval=30s",
+ """<resources>
+ <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="R-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ <op id="R-start-interval-20s" interval="20s"
+ name="start"
+ />
+ <op id="R-stop-interval-30s" interval="30s"
+ name="stop"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
diff --git a/pcs/test/cib_resource/test_stonith_create.py b/pcs/test/cib_resource/test_stonith_create.py
new file mode 100644
index 0000000..8993d8d
--- /dev/null
+++ b/pcs/test/cib_resource/test_stonith_create.py
@@ -0,0 +1,289 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs import utils
+from pcs.test.cib_resource.common import ResourceTest
+from pcs.test.tools import pcs_unittest as unittest
+from pcs.test.cib_resource.stonith_common import need_load_xvm_fence_agent
+
+need_fence_scsi_providing_unfencing = unittest.skipUnless(
+ not utils.is_rhel6(),
+ "test requires system where stonith agent 'fence_scsi' provides unfencing"
+)
+
+class PlainStonith(ResourceTest):
+ @need_load_xvm_fence_agent
+ def test_simplest(self):
+ self.assert_effect(
+ "stonith create S fence_xvm",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ @need_fence_scsi_providing_unfencing
+ def test_base_with_agent_that_provides_unfencing(self):
+ self.assert_effect(
+ "stonith create S fence_scsi",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_scsi">
+ <meta_attributes id="S-meta_attributes">
+ <nvpair id="S-meta_attributes-provides" name="provides"
+ value="unfencing"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_error_when_not_valid_name(self):
+ self.assert_pcs_fail_regardless_of_force(
+ "stonith create S fence_xvm:invalid",
+ "Error: Invalid stonith agent name 'fence_xvm:invalid'. List of"
+ " agents can be obtained by using command 'pcs stonith list'."
+ " Do not use the 'stonith:' prefix. Agent name cannot contain"
+ " the ':' character.\n"
+ )
+
+ def test_error_when_not_valid_agent(self):
+ self.assert_pcs_fail(
+ "stonith create S absent",
+ "Error: Agent 'absent' is not installed or does not provide valid"
+ " metadata: Metadata query for stonith:absent failed: -5, use"
+ " --force to override\n"
+ )
+
+ def test_warning_when_not_valid_agent(self):
+ self.assert_effect(
+ "stonith create S absent --force",
+ """<resources>
+ <primitive class="stonith" id="S" type="absent">
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ "Warning: Agent 'absent' is not installed or does not provide valid"
+ " metadata: Metadata query for stonith:absent failed: -5\n"
+ )
+
+ @need_load_xvm_fence_agent
+ def test_disabled_puts_target_role_stopped(self):
+ self.assert_effect(
+ "stonith create S fence_xvm --disabled",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <meta_attributes id="S-meta_attributes">
+ <nvpair id="S-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+
+class WithMeta(ResourceTest):
+ @need_load_xvm_fence_agent
+ def test_simplest_with_meta_provides(self):
+ self.assert_effect(
+ "stonith create S fence_xvm meta provides=something",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <meta_attributes id="S-meta_attributes">
+ <nvpair id="S-meta_attributes-provides" name="provides"
+ value="something"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ @need_fence_scsi_providing_unfencing
+ def test_base_with_agent_that_provides_unfencing_with_meta_provides(self):
+ self.assert_effect(
+ "stonith create S fence_scsi meta provides=something",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_scsi">
+ <meta_attributes id="S-meta_attributes">
+ <nvpair id="S-meta_attributes-provides" name="provides"
+ value="unfencing"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+class InGroup(ResourceTest):
+ @need_load_xvm_fence_agent
+ def test_command_simply_puts_stonith_into_group(self):
+ self.assert_effect(
+ "stonith create S fence_xvm --group G",
+ """<resources>
+ <group id="G">
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>"""
+ )
+
+ @need_load_xvm_fence_agent
+ def test_command_simply_puts_stonith_into_group_at_the_end(self):
+ self.assert_pcs_success("stonith create S1 fence_xvm --group G")
+ self.assert_effect(
+ "stonith create S2 fence_xvm --group G",
+ """<resources>
+ <group id="G">
+ <primitive class="stonith" id="S1" type="fence_xvm">
+ <operations>
+ <op id="S1-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="S2" type="fence_xvm">
+ <operations>
+ <op id="S2-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>"""
+ )
+
+ @need_load_xvm_fence_agent
+ def test_command_simply_puts_stonith_into_group_before_another(self):
+ self.assert_pcs_success("stonith create S1 fence_xvm --group G")
+ self.assert_effect(
+ "stonith create S2 fence_xvm --group G --before S1",
+ """<resources>
+ <group id="G">
+ <primitive class="stonith" id="S2" type="fence_xvm">
+ <operations>
+ <op id="S2-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="S1" type="fence_xvm">
+ <operations>
+ <op id="S1-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>"""
+ )
+
+ @need_load_xvm_fence_agent
+ def test_command_simply_puts_stonith_into_group_after_another(self):
+ self.assert_pcs_success_all([
+ "stonith create S1 fence_xvm --group G",
+ "stonith create S2 fence_xvm --group G",
+ ])
+ self.assert_effect(
+ "stonith create S3 fence_xvm --group G --after S1",
+ """<resources>
+ <group id="G">
+ <primitive class="stonith" id="S1" type="fence_xvm">
+ <operations>
+ <op id="S1-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="S3" type="fence_xvm">
+ <operations>
+ <op id="S3-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ <primitive class="stonith" id="S2" type="fence_xvm">
+ <operations>
+ <op id="S2-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </group>
+ </resources>"""
+ )
+
+ @need_load_xvm_fence_agent
+ def test_fail_when_inteded_before_item_does_not_exist(self):
+ self.assert_pcs_fail(
+ "stonith create S2 fence_xvm --group G --before S1",
+ "Error: there is no resource 'S1' in the group 'G'\n"
+ )
+
+ @need_load_xvm_fence_agent
+ def test_fail_when_inteded_after_item_does_not_exist(self):
+ self.assert_pcs_fail(
+ "stonith create S2 fence_xvm --group G --after S1",
+ "Error: there is no resource 'S1' in the group 'G'\n"
+ )
+
+ def test_fail_when_entered_both_after_and_before(self):
+ self.assert_pcs_fail(
+ "stonith create S fence_xvm --group G --after S1 --before S2",
+ "Error: you cannot specify both --before and --after\n"
+ )
+
+ def test_fail_when_after_is_used_without_group(self):
+ self.assert_pcs_fail(
+ "stonith create S fence_xvm --after S1",
+ "Error: you cannot use --after without --group\n"
+ )
+
+ def test_fail_when_before_is_used_without_group(self):
+ self.assert_pcs_fail(
+ "stonith create S fence_xvm --before S1",
+ "Error: you cannot use --before without --group\n"
+ )
+
+ def test_fail_when_before_after_conflicts_and_moreover_without_group(self):
+ self.assert_pcs_fail(
+ "stonith create S fence_xvm --after S1 --before S2",
+ "Error: you cannot specify both --before and --after"
+ " and you have to specify --group\n"
+ )
diff --git a/pcs/test/cib_resource/test_stonith_enable_disable.py b/pcs/test/cib_resource/test_stonith_enable_disable.py
new file mode 100644
index 0000000..956be0d
--- /dev/null
+++ b/pcs/test/cib_resource/test_stonith_enable_disable.py
@@ -0,0 +1,107 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.cib_resource.common import ResourceTest
+from pcs.test.cib_resource.stonith_common import need_load_xvm_fence_agent
+
+ at need_load_xvm_fence_agent
+class Enable(ResourceTest):
+ def test_enable_disabled_stonith(self):
+ self.assert_effect(
+ "stonith create S fence_xvm --disabled",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <meta_attributes id="S-meta_attributes">
+ <nvpair id="S-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+ self.assert_effect(
+ "stonith enable S",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_keep_enabled_stonith(self):
+ result_xml = """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+
+ self.assert_effect("stonith create S fence_xvm", result_xml)
+ self.assert_effect("stonith enable S", result_xml)
+
+ at need_load_xvm_fence_agent
+class Disable(ResourceTest):
+ def test_disable_enabled_stonith(self):
+ self.assert_effect(
+ "stonith create S fence_xvm",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+ self.assert_effect(
+ "stonith disable S",
+ """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <meta_attributes id="S-meta_attributes">
+ <nvpair id="S-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ )
+
+ def test_keep_disabled_stonith(self):
+ result_xml = """<resources>
+ <primitive class="stonith" id="S" type="fence_xvm">
+ <meta_attributes id="S-meta_attributes">
+ <nvpair id="S-meta_attributes-target-role"
+ name="target-role" value="Stopped"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="S-monitor-interval-60s" interval="60s"
+ name="monitor"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ self.assert_effect("stonith create S fence_xvm --disabled", result_xml)
+ self.assert_effect("stonith disable S", result_xml)
diff --git a/pcs/test/resources/cib-empty-2.3-withnodes.xml b/pcs/test/resources/cib-empty-2.3-withnodes.xml
new file mode 100644
index 0000000..19e2a48
--- /dev/null
+++ b/pcs/test/resources/cib-empty-2.3-withnodes.xml
@@ -0,0 +1,12 @@
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.3" crm_feature_set="3.0.10" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node id="1" uname="rh7-1"/>
+ <node id="2" uname="rh7-2"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/pcs/test/resources/cib-empty-2.5-withnodes.xml b/pcs/test/resources/cib-empty-2.5-withnodes.xml
new file mode 100644
index 0000000..a53cb43
--- /dev/null
+++ b/pcs/test/resources/cib-empty-2.5-withnodes.xml
@@ -0,0 +1,12 @@
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.10" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node id="1" uname="rh7-1"/>
+ <node id="2" uname="rh7-2"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+ <status/>
+</cib>
diff --git a/pcs/test/resources/cib-empty-2.5.xml b/pcs/test/resources/cib-empty-2.5.xml
index 1b4fb0a..9312ee9 100644
--- a/pcs/test/resources/cib-empty-2.5.xml
+++ b/pcs/test/resources/cib-empty-2.5.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.10" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty-2.5.xml b/pcs/test/resources/cib-empty-2.6.xml
similarity index 55%
copy from pcs/test/resources/cib-empty-2.5.xml
copy to pcs/test/resources/cib-empty-2.6.xml
index 1b4fb0a..fc845f4 100644
--- a/pcs/test/resources/cib-empty-2.5.xml
+++ b/pcs/test/resources/cib-empty-2.6.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.6" crm_feature_set="3.0.10" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty-2.5.xml b/pcs/test/resources/cib-empty-2.8.xml
similarity index 55%
copy from pcs/test/resources/cib-empty-2.5.xml
copy to pcs/test/resources/cib-empty-2.8.xml
index 1b4fb0a..e965fb5 100644
--- a/pcs/test/resources/cib-empty-2.5.xml
+++ b/pcs/test/resources/cib-empty-2.8.xml
@@ -1,4 +1,4 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.8" crm_feature_set="3.0.10" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
diff --git a/pcs/test/resources/cib-empty-2.5.xml b/pcs/test/resources/cib-empty-with3nodes.xml
similarity index 63%
copy from pcs/test/resources/cib-empty-2.5.xml
copy to pcs/test/resources/cib-empty-with3nodes.xml
index 1b4fb0a..d24c75c 100644
--- a/pcs/test/resources/cib-empty-2.5.xml
+++ b/pcs/test/resources/cib-empty-with3nodes.xml
@@ -1,7 +1,10 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
<configuration>
<crm_config/>
<nodes>
+ <node id="1" uname="rh7-1"/>
+ <node id="2" uname="rh7-2"/>
+ <node id="3" uname="rh7-3"/>
</nodes>
<resources/>
<constraints/>
diff --git a/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml
new file mode 100644
index 0000000..cd9e60b
--- /dev/null
+++ b/pcs/test/resources/resource_agent_ocf_heartbeat_dummy.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="Dummy" version="0.9">
+<version>1.0</version>
+
+<longdesc lang="en">
+This is a Dummy Resource Agent. It does absolutely nothing except
+keep track of whether its running or not.
+Its purpose in life is for testing and to serve as a template for RA writers.
+
+NB: Please pay attention to the timeouts specified in the actions
+section below. They should be meaningful for the kind of resource
+the agent manages. They should be the minimum advised timeouts,
+but they shouldn't/cannot cover _all_ possible resource
+instances. So, try to be neither overly generous nor too stingy,
+but moderate. The minimum timeouts should never be below 10 seconds.
+</longdesc>
+<shortdesc lang="en">Example stateless resource agent</shortdesc>
+
+<parameters>
+<parameter name="state" unique="1">
+<longdesc lang="en">
+Location to store the resource state in.
+</longdesc>
+<shortdesc lang="en">State file</shortdesc>
+<content type="string" default="/var/run/resource-agents/Dummy-undef.state" />
+</parameter>
+
+<parameter name="fake" unique="0">
+<longdesc lang="en">
+Fake attribute that can be changed to cause a reload
+</longdesc>
+<shortdesc lang="en">Fake attribute that can be changed to cause a reload</shortdesc>
+<content type="string" default="dummy" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="20" />
+<action name="stop" timeout="20" />
+<action name="monitor" timeout="20" interval="10" depth="0" />
+<action name="reload" timeout="20" />
+<action name="migrate_to" timeout="20" />
+<action name="migrate_from" timeout="20" />
+<action name="meta-data" timeout="5" />
+<action name="validate-all" timeout="20" />
+</actions>
+</resource-agent>
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
index fdab448..fcac586 100755
--- a/pcs/test/suite.py
+++ b/pcs/test/suite.py
@@ -8,6 +8,12 @@ from __future__ import (
import sys
import os.path
+is_2_7_or_higher = sys.version_info[0] > 2 or sys.version_info[1] > 6
+
+if is_2_7_or_higher:
+ import importlib
+
+
PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)
)))
@@ -22,8 +28,24 @@ def prepare_test_name(test_name):
but loader need it in module path format like:
"pcs.test.test_node"
so is practical accept fs path format and prepare it for loader
+
+ Sometimes name could include the .py extension:
+ "pcs/test/test_node.py"
+ in such cause is extension removed
"""
- return test_name.replace("/", ".")
+ candidate = test_name.replace("/", ".")
+ if not is_2_7_or_higher:
+ return candidate
+
+ py_extension = ".py"
+ if not candidate.endswith(py_extension):
+ return candidate
+
+ try:
+ importlib.import_module(candidate)
+ return candidate
+ except ImportError:
+ return candidate[:-len(py_extension)]
def tests_from_suite(test_candidate):
if isinstance(test_candidate, unittest.TestCase):
@@ -60,15 +82,26 @@ def discover_tests(explicitly_enumerated_tests, exclude_enumerated_tests=False):
explicitly_enumerated_tests = [
prepare_test_name(arg) for arg in sys.argv[1:] if arg not in (
"-v",
- "--vanilla",
- "--no-color", #deprecated, use --vanilla instead
"--all-but",
+ "--fast-info", #show a traceback immediatelly after the test fails
"--last-slash",
- "--traditional-verbose",
+ "--list",
+ "--no-color", #deprecated, use --vanilla instead
"--traceback-highlight",
+ "--traditional-verbose",
+ "--vanilla",
)
]
+discovered_tests = discover_tests(
+ explicitly_enumerated_tests, "--all-but" in sys.argv
+)
+if "--list" in sys.argv:
+ test_list = tests_from_suite(discovered_tests)
+ print("\n".join(sorted(test_list)))
+ print("{0} tests found".format(len(test_list)))
+ sys.exit()
+
if "--no-color" in sys.argv:
print("DEPRECATED: --no-color is deprecated, use --vanilla instead")
@@ -90,15 +123,14 @@ if use_improved_result_class:
slash_last_fail_in_overview=("--last-slash" in sys.argv),
traditional_verbose=("--traditional-verbose" in sys.argv),
traceback_highlight=("--traceback-highlight" in sys.argv),
+ fast_info=("--fast-info" in sys.argv)
)
testRunner = unittest.TextTestRunner(
verbosity=2 if "-v" in sys.argv else 1,
resultclass=resultclass
)
-test_result = testRunner.run(
- discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv)
-)
+test_result = testRunner.run(discovered_tests)
if not test_result.wasSuccessful():
sys.exit(1)
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index 9f9f878..a7ac41a 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -405,7 +405,7 @@ Group: group2
o,r = pcs("acl group delete user1")
assert r == 1
- ac(o,"Error: group 'user1' does not exist\n")
+ ac(o,"Error: 'user1' is not an acl group\n")
o,r = pcs("acl")
ac(o, """\
@@ -835,7 +835,7 @@ Role: role4
self.assert_pcs_success("acl group create group1")
self.assert_pcs_fail(
"acl role assign role1 to user group1",
- "Error: user 'group1' does not exist\n"
+ "Error: 'group1' is not an acl user\n"
)
def test_assign_unassign_role_to_user_with_to(self):
@@ -871,7 +871,7 @@ Role: role4
self.assert_pcs_success("acl user create user1")
self.assert_pcs_fail(
"acl role assign role1 to group user1",
- "Error: group 'user1' does not exist\n"
+ "Error: 'user1' is not an acl group\n"
)
def test_assign_unassign_role_to_group_with_to(self):
diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
index ccb53d7..5b4fc48 100644
--- a/pcs/test/test_alert.py
+++ b/pcs/test/test_alert.py
@@ -10,7 +10,7 @@ import shutil
from pcs.test.tools.misc import (
get_test_resource as rc,
- is_minimum_pacemaker_version,
+ skip_unless_pacemaker_version,
outdent,
)
from pcs.test.tools.assertions import AssertPcsMixin
@@ -22,11 +22,10 @@ old_cib = rc("cib-empty.xml")
empty_cib = rc("cib-empty-2.5.xml")
temp_cib = rc("temp-cib.xml")
-
-ALERTS_SUPPORTED = is_minimum_pacemaker_version(1, 1, 15)
-ALERTS_NOT_SUPPORTED_MSG = "Pacemaker version is too old (must be >= 1.1.15)" +\
- " to test alerts"
-
+skip_unless_alerts_supported = skip_unless_pacemaker_version(
+ (1, 1, 15),
+ "alerts"
+)
class PcsAlertTest(unittest.TestCase, AssertPcsMixin):
def setUp(self):
@@ -34,7 +33,7 @@ class PcsAlertTest(unittest.TestCase, AssertPcsMixin):
self.pcs_runner = PcsRunner(temp_cib)
- at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+ at skip_unless_alerts_supported
class AlertCibUpgradeTest(unittest.TestCase, AssertPcsMixin):
def setUp(self):
shutil.copy(old_cib, temp_cib)
@@ -63,7 +62,7 @@ Alerts:
)
- at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+ at skip_unless_alerts_supported
class CreateAlertTest(PcsAlertTest):
def test_create_multiple_without_id(self):
self.assert_pcs_success(
@@ -151,7 +150,7 @@ Alerts:
)
- at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+ at skip_unless_alerts_supported
class UpdateAlertTest(PcsAlertTest):
def test_update_everything(self):
self.assert_pcs_success(
@@ -192,15 +191,15 @@ Alerts:
def test_not_existing_alert(self):
self.assert_pcs_fail(
- "alert update alert1", "Error: Alert 'alert1' not found.\n"
+ "alert update alert1", "Error: alert 'alert1' does not exist\n"
)
- at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+ at skip_unless_alerts_supported
class RemoveAlertTest(PcsAlertTest):
def test_not_existing_alert(self):
self.assert_pcs_fail(
- "alert remove alert1", "Error: Alert 'alert1' not found.\n"
+ "alert remove alert1", "Error: alert 'alert1' does not exist\n"
)
def test_one(self):
@@ -260,7 +259,7 @@ class RemoveAlertTest(PcsAlertTest):
)
- at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+ at skip_unless_alerts_supported
class AddRecipientTest(PcsAlertTest):
def test_success(self):
self.assert_pcs_success("alert create path=test")
@@ -356,7 +355,7 @@ Alerts:
- at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+ at skip_unless_alerts_supported
class UpdateRecipientAlert(PcsAlertTest):
def test_success(self):
self.assert_pcs_success("alert create path=test")
@@ -470,7 +469,7 @@ Alerts:
def test_no_recipient(self):
self.assert_pcs_fail(
"alert recipient update rec description=desc",
- "Error: Recipient 'rec' does not exist\n"
+ "Error: recipient 'rec' does not exist\n"
)
def test_empty_value(self):
@@ -484,7 +483,7 @@ Alerts:
)
- at unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
+ at skip_unless_alerts_supported
class RemoveRecipientTest(PcsAlertTest):
def test_one(self):
self.assert_pcs_success("alert create path=test")
@@ -557,8 +556,8 @@ class RemoveRecipientTest(PcsAlertTest):
)
self.assert_pcs_fail(
"alert recipient remove rec1 rec2 rec3", outdent("""\
- Error: Recipient 'rec2' does not exist
- Error: Recipient 'rec3' does not exist
+ Error: recipient 'rec2' does not exist
+ Error: recipient 'rec3' does not exist
"""
)
)
diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py
index 2a445e1..2bbfe96 100644
--- a/pcs/test/test_booth.py
+++ b/pcs/test/test_booth.py
@@ -26,7 +26,7 @@ BOOTH_RESOURCE_AGENT_INSTALLED = os.path.exists(
need_booth_resource_agent = unittest.skipUnless(
BOOTH_RESOURCE_AGENT_INSTALLED,
"test requires resource agent ocf:pacemaker:booth-site"
- " which is not istalled"
+ " which is not installed"
)
@@ -152,7 +152,8 @@ class DestroyTest(BoothMixin, unittest.TestCase):
def test_failed_when_using_mocked_booth_env(self):
self.assert_pcs_fail(
"booth destroy",
- "Error: This command does not support --booth-conf, --booth-key\n"
+ "Error: This command does not support '--booth-conf', '--booth-key'"
+ "\n"
)
@need_booth_resource_agent
@@ -276,9 +277,9 @@ class CreateTest(BoothTest):
self.assert_pcs_success("resource show booth-booth-ip", [
" Resource: booth-booth-ip (class=ocf provider=heartbeat type=IPaddr2)",
" Attributes: ip=192.168.122.120",
- " Operations: start interval=0s timeout=20s (booth-booth-ip-start-interval-0s)",
+ " Operations: monitor interval=10s timeout=20s (booth-booth-ip-monitor-interval-10s)",
+ " start interval=0s timeout=20s (booth-booth-ip-start-interval-0s)",
" stop interval=0s timeout=20s (booth-booth-ip-stop-interval-0s)",
- " monitor interval=10s timeout=20s (booth-booth-ip-monitor-interval-10s)",
])
def test_refuse_create_booth_when_config_is_already_in_use(self):
@@ -328,6 +329,20 @@ class RemoveTest(BoothTest):
])
self.assert_pcs_success("resource show", "NO resources configured\n")
+ def test_remove_when_group_disabled(self):
+ self.assert_pcs_success("resource show", "NO resources configured\n")
+ self.assert_pcs_success("booth create ip 192.168.122.120")
+ self.assert_pcs_success("resource disable booth-booth-group")
+ self.assert_pcs_success("resource show", [
+ " Resource Group: booth-booth-group",
+ " booth-booth-ip (ocf::heartbeat:IPaddr2): Stopped (disabled)",
+ " booth-booth-service (ocf::pacemaker:booth-site): Stopped (disabled)",
+ ])
+ self.assert_pcs_success("booth remove", [
+ "Deleting Resource - booth-booth-ip",
+ "Deleting Resource (and group) - booth-booth-service",
+ ])
+ self.assert_pcs_success("resource show", "NO resources configured\n")
def test_remove_multiple_booth_configuration(self):
self.assert_pcs_success("resource show", "NO resources configured\n")
@@ -349,7 +364,6 @@ class RemoveTest(BoothTest):
"Deleting Resource - some-id",
])
-
class TicketGrantTest(BoothTest):
def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib(
self
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index f442f34..2b7fd5a 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -14,7 +14,8 @@ from pcs.test.tools.assertions import AssertPcsMixin
from pcs.test.tools.misc import (
ac,
get_test_resource as rc,
- is_minimum_pacemaker_version,
+ skip_unless_pacemaker_version,
+ outdent,
)
from pcs.test.tools.pcs_runner import (
pcs,
@@ -53,7 +54,7 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
os.unlink(cluster_conf_tmp)
def testNodeStandby(self):
- # only basic test, standby subcommands were m oved to 'pcs node'
+ # only basic test, standby subcommands were moved to 'pcs node'
output, returnVal = pcs(temp_cib, "cluster standby rh7-1")
ac(output, "")
assert returnVal == 0
@@ -63,6 +64,7 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
assert returnVal == 0
def testRemoteNode(self):
+ #pylint: disable=trailing-whitespace
o,r = pcs(
temp_cib,
"resource create D1 ocf:heartbeat:Dummy --no-default-ops"
@@ -75,43 +77,104 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
)
assert r==0 and o==""
- o,r = pcs(temp_cib, "cluster remote-node rh7-2 D1")
+ o,r = pcs(temp_cib, "cluster remote-node rh7-2g D1")
assert r==1 and o.startswith("\nUsage: pcs cluster remote-node")
- o,r = pcs(temp_cib, "cluster remote-node add rh7-2 D1")
- assert r==0 and o==""
-
- o,r = pcs(temp_cib, "cluster remote-node add rh7-1 D2 remote-port=100 remote-addr=400 remote-connect-timeout=50")
- assert r==0 and o==""
+ o,r = pcs(temp_cib, "cluster remote-node add rh7-2g D1 --force")
+ assert r==0
+ self.assertEqual(
+ o,
+ "Warning: this command is deprecated, use 'pcs cluster node"
+ " add-guest'\n"
+ )
- o,r = pcs(temp_cib, "resource --full")
+ o,r = pcs(
+ temp_cib,
+ "cluster remote-node add rh7-1 D2 remote-port=100 remote-addr=400"
+ " remote-connect-timeout=50 --force"
+ )
assert r==0
- ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: remote-node=rh7-2 \n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: remote-node=rh7-1 remote-port=100 remote-addr=400 remote-connect-timeout=50 \n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
+ self.assertEqual(
+ o,
+ "Warning: this command is deprecated, use 'pcs cluster node"
+ " add-guest'\n"
+ )
+
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Meta Attrs: remote-node=rh7-2g
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Meta Attrs: remote-node=rh7-1 remote-port=100 remote-addr=400 remote-connect-timeout=50
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ """
+ ))
o,r = pcs(temp_cib, "cluster remote-node remove")
assert r==1 and o.startswith("\nUsage: pcs cluster remote-node")
- o,r = pcs(temp_cib, "cluster remote-node remove rh7-2")
- assert r==0 and o==""
+ self.assert_pcs_fail(
+ "cluster remote-node remove rh7-2g",
+ "Error: this command is deprecated, use 'pcs cluster node"
+ " remove-guest', use --force to override\n"
+ )
+ self.assert_pcs_success(
+ "cluster remote-node remove rh7-2g --force",
+ "Warning: this command is deprecated, use 'pcs cluster node"
+ " remove-guest'\n"
+ )
- o,r = pcs(temp_cib, "cluster remote-node add rh7-2 NOTARESOURCE")
- assert r==1
- ac(o,"Error: unable to find resource 'NOTARESOURCE'\n")
+ self.assert_pcs_fail(
+ "cluster remote-node add rh7-2g NOTARESOURCE --force",
+ "Error: unable to find resource 'NOTARESOURCE'\n"
+ "Warning: this command is deprecated, use"
+ " 'pcs cluster node add-guest'\n"
+ ,
+ )
- o,r = pcs(temp_cib, "cluster remote-node remove rh7-2")
- assert r==1
- ac(o,"Error: unable to remove: cannot find remote-node 'rh7-2'\n")
+ self.assert_pcs_fail(
+ "cluster remote-node remove rh7-2g",
+ "Error: this command is deprecated, use 'pcs cluster node"
+ " remove-guest', use --force to override\n"
+ )
+ self.assert_pcs_fail(
+ "cluster remote-node remove rh7-2g --force",
+ "Error: unable to remove: cannot find remote-node 'rh7-2g'\n"
+ "Warning: this command is deprecated, use 'pcs cluster node"
+ " remove-guest'\n"
+ )
- o,r = pcs(temp_cib, "resource --full")
- assert r==0
- ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: remote-node=rh7-1 remote-port=100 remote-addr=400 remote-connect-timeout=50 \n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
- o,r = pcs(temp_cib, "cluster remote-node remove rh7-1")
- assert r==0 and o==""
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Meta Attrs: remote-node=rh7-1 remote-port=100 remote-addr=400 remote-connect-timeout=50
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ """
+ ))
- o,r = pcs(temp_cib, "resource --full")
- assert r==0
- ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
+ self.assert_pcs_fail(
+ "cluster remote-node remove rh7-1",
+ "Error: this command is deprecated, use 'pcs cluster node"
+ " remove-guest', use --force to override\n"
+ )
+ self.assert_pcs_success(
+ "cluster remote-node remove rh7-1 --force",
+ "Warning: this command is deprecated, use 'pcs cluster node"
+ " remove-guest'\n"
+ )
+
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ """
+ ))
def test_cluster_setup_bad_args(self):
output, returnVal = pcs(temp_cib, "cluster setup")
@@ -169,7 +232,6 @@ Warning: Unable to resolve hostname: nonexistant-address.invalid
corosync_conf = """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -228,7 +290,6 @@ Error: {0} already exists, use --force to overwrite
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -375,7 +436,6 @@ Error: {0} already exists, use --force to overwrite
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -416,7 +476,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -461,7 +520,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -502,7 +560,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -548,7 +605,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -590,7 +646,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -632,7 +687,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -673,7 +727,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -719,7 +772,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -765,7 +817,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
}
@@ -815,7 +866,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udp
}
@@ -1216,7 +1266,6 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
ip_version: ipv6
@@ -1310,7 +1359,7 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
assert r == 1
ac(
o,
- "Error: 'blah' is not a valid RRP mode value, use passive, active, use --force to override\n"
+ "Error: 'blah' is not a valid RRP mode value, use active, passive, use --force to override\n"
)
o,r = pcs(
@@ -1324,7 +1373,6 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udp
rrp_mode: passive
@@ -1383,7 +1431,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udp
rrp_mode: passive
@@ -1442,7 +1489,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udp
rrp_mode: passive
@@ -1501,7 +1547,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udp
rrp_mode: passive
@@ -1569,7 +1614,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udp
rrp_mode: active
@@ -1635,7 +1679,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udp
rrp_mode: active
@@ -1711,7 +1754,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: udpu
rrp_mode: passive
@@ -1800,7 +1842,6 @@ logging {
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: test99
transport: udpu
}
@@ -1850,7 +1891,7 @@ logging {
.format(cluster_conf_tmp)
)
ac(output, """\
-Error: 'blah' is not a valid RRP mode value, use passive, active, use --force to override
+Error: 'blah' is not a valid RRP mode value, use active, passive, use --force to override
Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
""")
self.assertEqual(returnVal, 1)
@@ -2385,7 +2426,6 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: test99
transport: udpu
token: 20000
@@ -2591,10 +2631,8 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
assert r == 0
ac(o, "No uidgids configured in cluster.conf\n")
+ @skip_unless_pacemaker_version((1, 1, 11), "CIB schema upgrade")
def testClusterUpgrade(self):
- if not is_minimum_pacemaker_version(1, 1, 11):
- print("WARNING: Unable to test cluster upgrade because pacemaker is older than 1.1.11")
- return
with open(temp_cib) as myfile:
data = myfile.read()
assert data.find("pacemaker-1.2") != -1
@@ -2631,7 +2669,6 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
ac(data, """\
totem {
version: 2
- secauth: off
cluster_name: cname
transport: unknown
}
@@ -2823,3 +2860,43 @@ logging {
</cluster>
""")
+
+class ClusterStartStop(unittest.TestCase, AssertPcsMixin):
+ def setUp(self):
+ self.pcs_runner = PcsRunner()
+
+ def test_all_and_nodelist(self):
+ self.assert_pcs_fail(
+ "cluster stop rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+ self.assert_pcs_fail(
+ "cluster start rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+
+
+class ClusterEnableDisable(unittest.TestCase, AssertPcsMixin):
+ def setUp(self):
+ self.pcs_runner = PcsRunner()
+
+ def test_all_and_nodelist(self):
+ self.assert_pcs_fail(
+ "cluster enable rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+ self.assert_pcs_fail(
+ "cluster disable rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+
+class NodeRemove(unittest.TestCase, AssertPcsMixin):
+ def setUp(self):
+ self.pcs_runner = PcsRunner()
+
+ def test_fail_when_node_does_not_exists(self):
+ self.assert_pcs_fail(
+ "cluster node remove not-existent --force", #
+ "Error: node 'not-existent' does not appear to exist in"
+ " configuration\n"
+ )
diff --git a/pcs/test/test_cluster_pcmk_remote.py b/pcs/test/test_cluster_pcmk_remote.py
new file mode 100644
index 0000000..5dc1633
--- /dev/null
+++ b/pcs/test/test_cluster_pcmk_remote.py
@@ -0,0 +1,504 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.cib_resource.common import ResourceTest
+from pcs.test.tools.misc import outdent
+
+fixture_nolive_add_report = outdent(
+ """\
+ the distribution of 'pacemaker authkey' to 'node-host' was skipped because command does not run on live cluster (e.g. -f was used). You will have to do it manually.
+ running 'pacemaker_remote start' on 'node-host' was skipped because command does not run on live cluster (e.g. -f was used). You will have to run it manually.
+ running 'pacemaker_remote enable' on 'node-host' was skipped because command does not run on live cluster (e.g. -f was used). You will have to run it manually.
+ """
+)
+
+def fixture_nolive_remove_report(host_list):
+ return outdent(
+ """\
+ running 'pacemaker_remote stop' on {hosts} was skipped because command does not run on live cluster (e.g. -f was used). You will have to run it manually.
+ running 'pacemaker_remote disable' on {hosts} was skipped because command does not run on live cluster (e.g. -f was used). You will have to run it manually.
+ 'pacemaker authkey' remove from {hosts} was skipped because command does not run on live cluster (e.g. -f was used). You will have to do it manually.
+ """
+ ).format(hosts=", ".join("'{0}'".format(host) for host in host_list))
+
+
+
+class NodeAddRemote(ResourceTest):
+ def test_fail_on_duplicit_host_specification(self):
+ self.assert_pcs_fail(
+ "cluster node add-remote HOST remote-node server=DIFFERENT",
+ "Error: invalid resource option 'server', allowed options"
+ " are: port, reconnect_interval, trace_file, trace_ra\n"
+ )
+
+ def test_fail_on_duplicit_host_specification_without_name(self):
+ self.assert_pcs_fail(
+ "cluster node add-remote HOST server=DIFFERENT",
+ "Error: invalid resource option 'server', allowed options"
+ " are: port, reconnect_interval, trace_file, trace_ra\n"
+ )
+
+ def test_fail_on_unknown_instance_attribute_not_offer_server(self):
+ self.assert_pcs_fail(
+ "cluster node add-remote HOST remote-node abcd=efgh",
+ "Error: invalid resource option 'abcd', allowed options"
+ " are: port, reconnect_interval, trace_file, trace_ra, use"
+ " --force to override\n"
+ )
+
+ def test_fail_on_bad_commandline_usage(self):
+ self.assert_pcs_fail(
+ "cluster node add-remote",
+ stdout_start="\nUsage: pcs cluster node add-remote..."
+ )
+
+ def test_success(self):
+ self.assert_effect(
+ "cluster node add-remote node-host node-name",
+ """<resources>
+ <primitive class="ocf" id="node-name" provider="pacemaker"
+ type="remote"
+ >
+ <instance_attributes id="node-name-instance_attributes">
+ <nvpair id="node-name-instance_attributes-server"
+ name="server" value="node-host"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="node-name-monitor-interval-60s" interval="60s"
+ name="monitor" timeout="30"
+ />
+ <op id="node-name-start-interval-0s" interval="0s"
+ name="start" timeout="60"
+ />
+ <op id="node-name-stop-interval-0s" interval="0s"
+ name="stop" timeout="60"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ output=fixture_nolive_add_report
+ )
+
+ def test_success_no_default_ops(self):
+ self.assert_effect(
+ "cluster node add-remote node-host node-name --no-default-ops",
+ """<resources>
+ <primitive class="ocf" id="node-name" provider="pacemaker"
+ type="remote"
+ >
+ <instance_attributes id="node-name-instance_attributes">
+ <nvpair id="node-name-instance_attributes-server"
+ name="server" value="node-host"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="node-name-monitor-interval-60s" interval="60s"
+ name="monitor" timeout="30"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ output=fixture_nolive_add_report
+ )
+
+ def test_fail_when_server_already_used(self):
+ self.assert_effect(
+ "cluster node add-remote node-host A --no-default-ops",
+ """<resources>
+ <primitive class="ocf" id="A" provider="pacemaker"
+ type="remote"
+ >
+ <instance_attributes id="A-instance_attributes">
+ <nvpair id="A-instance_attributes-server" name="server"
+ value="node-host"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="A-monitor-interval-60s" interval="60s"
+ name="monitor" timeout="30"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ output=fixture_nolive_add_report
+ )
+ self.assert_pcs_fail(
+ "cluster node add-remote node-host B",
+ "Error: 'node-host' already exists\n"
+ )
+
+ def test_fail_when_server_already_used_as_guest(self):
+ self.assert_pcs_success(
+ "resource create G ocf:heartbeat:Dummy --no-default-ops",
+ )
+ self.assert_pcs_success(
+ "cluster node add-guest node-host G",
+ fixture_nolive_add_report
+ )
+ self.assert_pcs_fail(
+ "cluster node add-remote node-host B",
+ "Error: 'node-host' already exists\n"
+ )
+
+class NodeAddGuest(ResourceTest):
+ def create_resource(self):
+ self.assert_effect(
+ "resource create G ocf:heartbeat:Dummy --no-default-ops",
+ """<resources>
+ <primitive class="ocf" id="G" provider="heartbeat" type="Dummy">
+ <operations>
+ <op id="G-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ )
+
+ def test_fail_on_bad_commandline_usage(self):
+ self.assert_pcs_fail(
+ "cluster node add-guest",
+ stdout_start="\nUsage: pcs cluster node add-guest..."
+ )
+
+ def test_fail_when_resource_does_not_exists(self):
+ self.assert_pcs_fail(
+ "cluster node add-guest some-host non-existent",
+ "Error: primitive 'non-existent' does not exist\n"
+ )
+
+ def test_fail_when_option_remote_node_specified(self):
+ self.create_resource()
+ self.assert_pcs_fail(
+ "cluster node add-guest some-host G remote-node=node-name",
+ stdout_start="Error: invalid guest option 'remote-node',"
+ " allowed options are: remote-addr, remote-connect-timeout,"
+ " remote-port\n"
+ )
+
+ def test_fail_when_resource_has_already_remote_node_meta(self):
+ self.assert_pcs_success(
+ "resource create already-guest-node ocf:heartbeat:Dummy"
+ " meta remote-node=some --force"
+ ,
+ "Warning: this command is not sufficient for creating a guest node, use"
+ " 'pcs cluster node add-guest'\n"
+ )
+ self.assert_pcs_fail(
+ "cluster node add-guest some-host already-guest-node",
+ "Error: the resource 'already-guest-node' is already a guest node\n"
+ )
+
+ def test_fail_on_combined_reasons(self):
+ self.assert_pcs_fail(
+ "cluster node add-guest node-host G a=b",
+ "Error: invalid guest option 'a', allowed options are:"
+ " remote-addr, remote-connect-timeout, remote-port\n"
+ "Error: primitive 'G' does not exist\n"
+ )
+
+ def test_fail_when_disallowed_option_appear(self):
+ self.create_resource()
+ self.assert_pcs_fail(
+ "cluster node add-guest node-host G a=b",
+ "Error: invalid guest option 'a', allowed options are:"
+ " remote-addr, remote-connect-timeout, remote-port\n"
+ )
+
+ def test_fail_when_invalid_interval_appear(self):
+ self.create_resource()
+ self.assert_pcs_fail(
+ "cluster node add-guest node-host G remote-connect-timeout=A",
+ "Error: 'A' is not a valid remote-connect-timeout value, use time"
+ " interval (e.g. 1, 2s, 3m, 4h, ...)\n"
+ )
+
+ def test_fail_when_invalid_port_appear(self):
+ self.create_resource()
+ self.assert_pcs_fail(
+ "cluster node add-guest node-host G remote-port=70000",
+ "Error: '70000' is not a valid remote-port value, use a port number"
+ " (1-65535)\n"
+ )
+
+ def test_fail_when_guest_node_conflicts_with_existing_id(self):
+ self.create_resource()
+ self.assert_pcs_success("resource create CONFLICT ocf:heartbeat:Dummy")
+ self.assert_pcs_fail(
+ "cluster node add-guest CONFLICT G",
+ "Error: 'CONFLICT' already exists\n"
+ )
+
+ def test_fail_when_guest_node_conflicts_with_existing_guest(self):
+ self.create_resource()
+ self.assert_pcs_success("resource create H ocf:heartbeat:Dummy")
+ self.assert_pcs_success(
+ "cluster node add-guest node-host G",
+ fixture_nolive_add_report
+ )
+ self.assert_pcs_fail(
+ "cluster node add-guest node-host H",
+ "Error: 'node-host' already exists\n"
+ )
+
+ def test_fail_when_guest_node_conflicts_with_existing_remote(self):
+ self.create_resource()
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote server=node-host --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+ self.assert_pcs_fail(
+ "cluster node add-guest node-host G",
+ "Error: 'node-host' already exists\n"
+ )
+
+ def test_fail_when_guest_node_name_conflicts_with_existing_remote(self):
+ self.create_resource()
+ self.assert_pcs_success(
+ "resource create R ocf:pacemaker:remote server=node-host --force",
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+ self.assert_pcs_fail(
+ "cluster node add-guest R G",
+ "Error: 'R' already exists\n"
+ )
+
+ def test_success(self):
+ self.create_resource()
+ self.assert_effect(
+ "cluster node add-guest node-host G",
+ """<resources>
+ <primitive class="ocf" id="G" provider="heartbeat" type="Dummy">
+ <meta_attributes id="G-meta_attributes">
+ <nvpair id="G-meta_attributes-remote-node"
+ name="remote-node" value="node-host"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="G-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ output=fixture_nolive_add_report
+ )
+
+ def test_success_with_options(self):
+ self.create_resource()
+ self.assert_effect(
+ "cluster node add-guest node-name G remote-port=3121"
+ " remote-addr=node-host remote-connect-timeout=80s"
+ ,
+ """<resources>
+ <primitive class="ocf" id="G" provider="heartbeat" type="Dummy">
+ <meta_attributes id="G-meta_attributes">
+ <nvpair id="G-meta_attributes-remote-addr"
+ name="remote-addr" value="node-host"
+ />
+ <nvpair id="G-meta_attributes-remote-connect-timeout"
+ name="remote-connect-timeout" value="80s"
+ />
+ <nvpair id="G-meta_attributes-remote-node"
+ name="remote-node" value="node-name"
+ />
+ <nvpair id="G-meta_attributes-remote-port"
+ name="remote-port" value="3121"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="G-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ output=fixture_nolive_add_report
+ )
+
+class NodeRemoveRemote(ResourceTest):
+ def test_fail_when_node_does_not_exists(self):
+ self.assert_pcs_fail(
+ "cluster node remove-remote not-existent",
+ "Error: remote node 'not-existent' does not appear to exist in"
+ " configuration\n"
+ )
+
+ def fixture_remote_node(self):
+ self.assert_effect(
+ "resource create NODE-NAME ocf:pacemaker:remote server=NODE-HOST"
+ " --no-default-ops --force"
+ ,
+ """<resources>
+ <primitive class="ocf" id="NODE-NAME" provider="pacemaker"
+ type="remote"
+ >
+ <instance_attributes id="NODE-NAME-instance_attributes">
+ <nvpair id="NODE-NAME-instance_attributes-server"
+ name="server" value="NODE-HOST"
+ />
+ </instance_attributes>
+ <operations>
+ <op id="NODE-NAME-monitor-interval-60s" interval="60s"
+ name="monitor" timeout="30"
+ />
+ </operations>
+ </primitive>
+ </resources>"""
+ ,
+ "Warning: this command is not sufficient for creating a remote"
+ " connection, use 'pcs cluster node add-remote'\n"
+ )
+
+ def fixture_multiple_remote_nodes(self):
+ #bypass pcs validation mechanisms (including expected future validation)
+ temp_cib = open(self.temp_cib, "w")
+ temp_cib.write("""
+ <cib epoch="557" num_updates="122" admin_epoch="0"
+ validate-with="pacemaker-1.2" crm_feature_set="3.0.6"
+ update-origin="rh7-3" update-client="crmd"
+ cib-last-written="Thu Aug 23 16:49:17 2012"
+ have-quorum="0" dc-uuid="2"
+ >
+ <configuration>
+ <crm_config/>
+ <nodes>
+ </nodes>
+ <resources>
+ <primitive class="ocf" id="NODE-NAME"
+ provider="pacemaker" type="remote"
+ >
+ <instance_attributes id="ia1">
+ <nvpair id="nvp1" name="server" value="HOST-A"/>
+ </instance_attributes>
+ </primitive>
+ <primitive class="ocf" id="HOST-A"
+ provider="pacemaker" type="remote"
+ >
+ <instance_attributes id="ia2">
+ <nvpair id="nvp2" name="server" value="HOST-B"/>
+ </instance_attributes>
+ </primitive>
+ </resources>
+ <constraints/>
+ </configuration>
+ <status/>
+ </cib>
+ """)
+ temp_cib.close()
+
+ def test_success_remove_by_host(self):
+ self.fixture_remote_node()
+ self.assert_effect(
+ "cluster node remove-remote NODE-HOST",
+ "<resources/>",
+ outdent(
+ """\
+ Deleting Resource - NODE-NAME
+ """
+ ) + fixture_nolive_remove_report(["NODE-HOST"])
+ )
+
+ def test_success_remove_by_node_name(self):
+ self.fixture_remote_node()
+ self.assert_effect(
+ "cluster node remove-remote NODE-NAME",
+ "<resources/>",
+ outdent(
+ """\
+ Deleting Resource - NODE-NAME
+ """
+ ) + fixture_nolive_remove_report(["NODE-HOST"])
+ )
+
+ def test_refuse_on_duplicit(self):
+ self.fixture_multiple_remote_nodes()
+ self.assert_pcs_fail(
+ "cluster node remove-remote HOST-A", #
+ "Error: multiple resource for 'HOST-A' found: "
+ "'HOST-A', 'NODE-NAME', use --force to override\n"
+ )
+
+ def test_success_remove_multiple_nodes(self):
+ self.fixture_multiple_remote_nodes()
+ self.assert_effect(
+ "cluster node remove-remote HOST-A --force",
+ "<resources/>",
+ outdent(
+ """\
+ Warning: multiple resource for 'HOST-A' found: 'HOST-A', 'NODE-NAME'
+ Deleting Resource - NODE-NAME
+ Deleting Resource - HOST-A
+ """
+ ) + fixture_nolive_remove_report(["HOST-A", "HOST-B"])
+ )
+
+class NodeRemoveGuest(ResourceTest):
+ def fixture_guest_node(self):
+ self.assert_effect(
+ "resource create NODE-ID ocf:heartbeat:Dummy --no-default-ops"
+ " meta remote-node=NODE-NAME remote-addr=NODE-HOST --force"
+ ,
+ """<resources>
+ <primitive class="ocf" id="NODE-ID" provider="heartbeat"
+ type="Dummy"
+ >
+ <meta_attributes id="NODE-ID-meta_attributes">
+ <nvpair id="NODE-ID-meta_attributes-remote-addr"
+ name="remote-addr" value="NODE-HOST"
+ />
+ <nvpair id="NODE-ID-meta_attributes-remote-node"
+ name="remote-node" value="NODE-NAME"
+ />
+ </meta_attributes>
+ <operations>
+ <op id="NODE-ID-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ "Warning: this command is not sufficient for creating a guest node, use"
+ " 'pcs cluster node add-guest'\n"
+ )
+
+ def test_fail_when_node_does_not_exists(self):
+ self.assert_pcs_fail(
+ "cluster node remove-guest not-existent --force",
+ "Error: guest node 'not-existent' does not appear to exist in"
+ " configuration\n"
+ )
+
+ def assert_remove_by_identifier(self, identifier):
+ self.fixture_guest_node()
+ self.assert_effect(
+ "cluster node remove-guest {0}".format(identifier),
+ """<resources>
+ <primitive class="ocf" id="NODE-ID" provider="heartbeat"
+ type="Dummy"
+ >
+ <operations>
+ <op id="NODE-ID-monitor-interval-10" interval="10"
+ name="monitor" timeout="20"
+ />
+ </operations>
+ </primitive>
+ </resources>""",
+ fixture_nolive_remove_report(["NODE-HOST"])
+ )
+
+ def test_success_remove_by_node_name(self):
+ self.assert_remove_by_identifier("NODE-NAME")
+
+ def test_success_remove_by_resource_id(self):
+ self.assert_remove_by_identifier("NODE-ID")
+
+ def test_success_remove_by_resource_host(self):
+ self.assert_remove_by_identifier("NODE-HOST")
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index 5a14e59..69d955d 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -5,15 +5,19 @@ from __future__ import (
unicode_literals,
)
+from lxml import etree
import os
import shutil
from pcs.test.tools import pcs_unittest as unittest
from pcs.test.tools.assertions import AssertPcsMixin, console_report
+from pcs.test.tools.cib import get_assert_pcs_effect_mixin
from pcs.test.tools.misc import (
ac,
get_test_resource as rc,
- is_minimum_pacemaker_version,
+ skip_unless_pacemaker_supports_bundle,
+ skip_unless_pacemaker_version,
+ outdent,
)
from pcs.test.tools.pcs_runner import pcs, PcsRunner
@@ -23,12 +27,29 @@ empty_cib_1_2 = rc("cib-empty-1.2.xml")
temp_cib = rc("temp-cib.xml")
large_cib = rc("cib-large.xml")
+skip_unless_location_rsc_pattern = skip_unless_pacemaker_version(
+ (1, 1, 16),
+ "location constraints with resource patterns"
+)
+
class ConstraintTest(unittest.TestCase):
def setUp(self):
+ with open(temp_cib, "w") as temp_cib_file:
+ temp_cib_file.write(self.fixture_cib_cache())
+
+ def fixture_cib_cache(self):
+ if not hasattr(self.__class__, "cib_cache"):
+ self.__class__.cib_cache = self.fixture_cib()
+ return self.__class__.cib_cache
+
+ def fixture_cib(self):
shutil.copy(empty_cib, temp_cib)
self.setupClusterA(temp_cib)
+ cib_content = open(temp_cib).read()
+ shutil.copy(empty_cib, temp_cib)
+ return cib_content
- # Setups up a cluster with Resources, groups, master/slave resource and clones
+ # Sets up a cluster with Resources, groups, master/slave resource and clones
def setupClusterA(self,temp_cib):
line = "resource create D1 ocf:heartbeat:Dummy"
output, returnVal = pcs(temp_cib, line)
@@ -233,11 +254,11 @@ Ticket Constraints:
ac(o,"Location Constraints:\nOrdering Constraints:\n stop D1 then stop D2 (kind:Mandatory) (id:order-D1-D2-mandatory)\n start D1 then start D2 (kind:Mandatory) (id:order-D1-D2-mandatory-1)\nColocation Constraints:\nTicket Constraints:\n")
assert r == 0
+ @skip_unless_pacemaker_version(
+ (1, 1, 12),
+ "constraints with the require-all option"
+ )
def testOrderConstraintRequireAll(self):
- if not is_minimum_pacemaker_version(1, 1, 12):
- print("WARNING: Pacemaker version is too old (must be >= 1.1.12) to test require-all")
- return
-
o,r = pcs("cluster cib-upgrade")
ac(o,"Cluster CIB has been upgraded to latest version\n")
assert r == 0
@@ -275,6 +296,7 @@ Ticket Constraints:
ac(output,"Location Constraints:\n Resource: D5\n Enabled on: node1 (score:INFINITY) (id:location-D5-node1-INFINITY)\nOrdering Constraints:\n start Master then start D5 (kind:Mandatory) (id:order-Master-D5-mandatory)\nColocation Constraints:\n Master with D5 (score:INFINITY) (id:colocation-Master-D5-INFINITY)\nTicket Constraints:\n")
def testLocationConstraints(self):
+ # see also BundleLocation
output, returnVal = pcs(temp_cib, "constraint location D5 prefers node1")
assert returnVal == 0 and output == "", output
@@ -318,6 +340,7 @@ Ticket Constraints:
assert returnVal == 0
def testColocationConstraints(self):
+ # see also BundleColocation
line = "resource create M1 ocf:heartbeat:Dummy --master"
output, returnVal = pcs(temp_cib, line)
assert returnVal == 0 and output == ""
@@ -390,6 +413,7 @@ Ticket Constraints:
ac(o,'Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n D1 with D3-clone (score:INFINITY)\n D1 with D2 (score:100)\n D1 with D2 (score:-100)\n Master with D5 (score:100)\n M1-master with M2-master (score:INFINITY) (rsc-role:Master) (with-rsc-role:Master)\n M3-master with M4-master (score:INFINITY)\n M5-master with M6-master (score:500) (rsc-role:Slave) (with-rsc-role:Started)\n M7-master with M8-master (score:INFINITY) (rsc-role:Started) (with-rsc-ro [...]
def testColocationSets(self):
+ # see also BundleColocation
line = "resource create D7 ocf:heartbeat:Dummy"
output, returnVal = pcs(temp_cib, line)
assert returnVal == 0 and output == ""
@@ -454,11 +478,24 @@ Colocation Constraints:
assert r == 0
o, r = pcs(temp_cib, "resource delete D5")
- ac(o,"Removing D5 from set pcs_rsc_set_D5_D6_D7\nRemoving D5 from set pcs_rsc_set_D5_D6-1\nDeleting Resource - D5\n")
+ ac(o, outdent(
+ """\
+ Removing D5 from set pcs_rsc_set_D5_D6_D7
+ Removing D5 from set pcs_rsc_set_D5_D6-1
+ Deleting Resource - D5
+ """
+ ))
assert r == 0
o, r = pcs(temp_cib, "resource delete D6")
- ac(o,"Removing D6 from set pcs_rsc_set_D5_D6_D7\nRemoving D6 from set pcs_rsc_set_D5_D6-1\nRemoving set pcs_rsc_set_D5_D6-1\nDeleting Resource - D6\n")
+ ac(o, outdent(
+ """\
+ Removing D6 from set pcs_rsc_set_D5_D6_D7
+ Removing D6 from set pcs_rsc_set_D5_D6-1
+ Removing set pcs_rsc_set_D5_D6-1
+ Deleting Resource - D6
+ """
+ ))
assert r == 0
o, r = pcs(temp_cib, "constraint ref D7")
@@ -470,19 +507,19 @@ Colocation Constraints:
assert r == 0
output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 sequential=foo")
- ac(output, "Error: 'foo' is not a valid sequential value, use true, false\n")
+ ac(output, "Error: 'foo' is not a valid sequential value, use false, true\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 require-all=foo")
- ac(output, "Error: 'foo' is not a valid require-all value, use true, false\n")
+ ac(output, "Error: 'foo' is not a valid require-all value, use false, true\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 role=foo")
- ac(output, "Error: 'foo' is not a valid role value, use Stopped, Started, Master, Slave\n")
+ ac(output, "Error: 'foo' is not a valid role value, use Master, Slave, Started, Stopped\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 action=foo")
- ac(output, "Error: 'foo' is not a valid action value, use start, promote, demote, stop\n")
+ ac(output, "Error: 'foo' is not a valid action value, use demote, promote, start, stop\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 foo=bar")
@@ -505,11 +542,11 @@ Colocation Constraints:
ac(output, "")
self.assertEqual(0, retValue)
+ @skip_unless_pacemaker_version(
+ (1, 1, 12),
+ "constraints with the resource-discovery option"
+ )
def testConstraintResourceDiscovery(self):
- if not is_minimum_pacemaker_version(1, 1, 12):
- print("WARNING: Pacemaker version is too old (must be >= 1.1.12) to test resource-discovery")
- return
-
o,r = pcs("resource create crd ocf:heartbeat:Dummy")
ac(o,"")
assert r == 0
@@ -644,6 +681,7 @@ Colocation Constraints:
assert r == 0
def testOrderSets(self):
+ # see also BundleOrder
line = "resource create D7 ocf:heartbeat:Dummy"
output, returnVal = pcs(temp_cib, line)
assert returnVal == 0 and output == ""
@@ -708,27 +746,40 @@ Ordering Constraints:
""")
o, r = pcs(temp_cib, "resource delete D5")
- ac(o,"Removing D5 from set pcs_rsc_set_D5_D6_D7\nRemoving D5 from set pcs_rsc_set_D5_D6-1\nDeleting Resource - D5\n")
+ ac(o, outdent(
+ """\
+ Removing D5 from set pcs_rsc_set_D5_D6_D7
+ Removing D5 from set pcs_rsc_set_D5_D6-1
+ Deleting Resource - D5
+ """
+ ))
assert r == 0
o, r = pcs(temp_cib, "resource delete D6")
- ac(o,"Removing D6 from set pcs_rsc_set_D5_D6_D7\nRemoving D6 from set pcs_rsc_set_D5_D6-1\nRemoving set pcs_rsc_set_D5_D6-1\nDeleting Resource - D6\n")
+ ac(o, outdent(
+ """\
+ Removing D6 from set pcs_rsc_set_D5_D6_D7
+ Removing D6 from set pcs_rsc_set_D5_D6-1
+ Removing set pcs_rsc_set_D5_D6-1
+ Deleting Resource - D6
+ """
+ ))
assert r == 0
output, retValue = pcs(temp_cib, "constraint order set D1 D2 sequential=foo")
- ac(output, "Error: 'foo' is not a valid sequential value, use true, false\n")
+ ac(output, "Error: 'foo' is not a valid sequential value, use false, true\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint order set D1 D2 require-all=foo")
- ac(output, "Error: 'foo' is not a valid require-all value, use true, false\n")
+ ac(output, "Error: 'foo' is not a valid require-all value, use false, true\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint order set D1 D2 role=foo")
- ac(output, "Error: 'foo' is not a valid role value, use Stopped, Started, Master, Slave\n")
+ ac(output, "Error: 'foo' is not a valid role value, use Master, Slave, Started, Stopped\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint order set D1 D2 action=foo")
- ac(output, "Error: 'foo' is not a valid action value, use start, promote, demote, stop\n")
+ ac(output, "Error: 'foo' is not a valid action value, use demote, promote, start, stop\n")
self.assertEqual(1, retValue)
output, retValue = pcs(temp_cib, "constraint order set D1 D2 foo=bar")
@@ -748,14 +799,14 @@ Error: invalid option 'foo', allowed options are: id, kind, symmetrical
temp_cib,
"constraint order set D1 D2 setoptions kind=foo"
)
- ac(output, "Error: 'foo' is not a valid kind value, use Optional, Mandatory, Serialize\n")
+ ac(output, "Error: 'foo' is not a valid kind value, use Mandatory, Optional, Serialize\n")
self.assertEqual(1, retValue)
output, retValue = pcs(
temp_cib,
"constraint order set D1 D2 setoptions symmetrical=foo"
)
- ac(output, "Error: 'foo' is not a valid symmetrical value, use true, false\n")
+ ac(output, "Error: 'foo' is not a valid symmetrical value, use false, true\n")
self.assertEqual(1, retValue)
output, retValue = pcs(
@@ -1892,9 +1943,15 @@ Ticket Constraints:
# deleting the remote node resource
output, returnVal = pcs(
temp_cib,
- 'resource create vm-guest1 ocf:heartbeat:VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
+ 'resource create vm-guest1 ocf:heartbeat:VirtualDomain'
+ ' hypervisor="qemu:///system" config="/root/guest1.xml" meta'
+ ' remote-node=guest1 --force'
+ )
+ ac(
+ output,
+ "Warning: this command is not sufficient for creating a guest node, use"
+ " 'pcs cluster node add-guest'\n"
)
- ac(output, "")
self.assertEqual(0, returnVal)
output, returnVal = pcs(
@@ -1937,11 +1994,13 @@ Ticket Constraints:
self.assertEqual(0, returnVal)
output, returnVal = pcs(temp_cib, "resource delete vm-guest1")
- ac(output, """\
-Removing Constraint - location-D1-guest1-200
-Removing Constraint - location-D2-guest1--400
-Deleting Resource - vm-guest1
-""")
+ ac(output, outdent(
+ """\
+ Removing Constraint - location-D1-guest1-200
+ Removing Constraint - location-D2-guest1--400
+ Deleting Resource - vm-guest1
+ """
+ ))
self.assertEqual(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint --full")
@@ -1961,9 +2020,15 @@ Ticket Constraints:
# removing the remote node
output, returnVal = pcs(
temp_cib,
- 'resource create vm-guest1 ocf:heartbeat:VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
+ 'resource create vm-guest1 ocf:heartbeat:VirtualDomain'
+ ' hypervisor="qemu:///system" config="/root/guest1.xml"'
+ ' meta remote-node=guest1 --force'
+ )
+ ac(
+ output,
+ "Warning: this command is not sufficient for creating a guest node, use"
+ " 'pcs cluster node add-guest'\n"
)
- ac(output, "")
self.assertEqual(0, returnVal)
output, returnVal = pcs(
@@ -1994,9 +2059,13 @@ Ticket Constraints:
self.assertEqual(0, returnVal)
output, returnVal = pcs(
- temp_cib, "cluster remote-node remove guest1"
+ temp_cib, "cluster remote-node remove guest1 --force"
+ )
+ ac(
+ output,
+ "Warning: this command is deprecated, use 'pcs cluster node"
+ " remove-guest'\n"
)
- ac(output, "")
self.assertEqual(0, returnVal)
output, returnVal = pcs(temp_cib, "constraint --full")
@@ -2013,18 +2082,20 @@ Ticket Constraints:
self.assertEqual(0, returnVal)
output, returnVal = pcs(temp_cib, "resource delete vm-guest1")
- ac(output, """\
-Deleting Resource - vm-guest1
-""")
+ ac(output, "Deleting Resource - vm-guest1\n")
self.assertEqual(0, returnVal)
# constraints referencing the remote node resource
# deleting the remote node resource
output, returnVal = pcs(
temp_cib,
- 'resource create vm-guest1 ocf:heartbeat:VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
+ 'resource create vm-guest1 ocf:heartbeat:VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1 --force'
+ )
+ ac(
+ output,
+ "Warning: this command is not sufficient for creating a guest node, use"
+ " 'pcs cluster node add-guest'\n"
)
- ac(output, "")
self.assertEqual(0, returnVal)
output, returnVal = pcs(
@@ -2034,10 +2105,12 @@ Deleting Resource - vm-guest1
self.assertEqual(0, returnVal)
output, returnVal = pcs(temp_cib, "resource delete vm-guest1")
- ac(output, """\
-Removing Constraint - location-vm-guest1-node1-INFINITY
-Deleting Resource - vm-guest1
-""")
+ ac(output, outdent(
+ """\
+ Removing Constraint - location-vm-guest1-node1-INFINITY
+ Deleting Resource - vm-guest1
+ """
+ ))
self.assertEqual(0, returnVal)
def testDuplicateOrder(self):
@@ -2597,9 +2670,12 @@ Ticket Constraints:
self.assertEqual(0, returnVal)
class ConstraintBaseTest(unittest.TestCase, AssertPcsMixin):
+ temp_cib = rc("temp-cib.xml")
+ empty_cib = rc("cib-empty.xml")
+
def setUp(self):
- shutil.copy(empty_cib, temp_cib)
- self.pcs_runner = PcsRunner(temp_cib)
+ shutil.copy(self.empty_cib, self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
self.assert_pcs_success('resource create A ocf:heartbeat:Dummy')
self.assert_pcs_success('resource create B ocf:heartbeat:Dummy')
@@ -2608,7 +2684,7 @@ class CommonCreateWithSet(ConstraintBaseTest):
def test_refuse_when_resource_does_not_exist(self):
self.assert_pcs_fail(
'constraint ticket set A C setoptions ticket=T',
- ["Error: Resource 'C' does not exist"]
+ ["Error: resource 'C' does not exist"]
)
class TicketCreateWithSet(ConstraintBaseTest):
@@ -2628,7 +2704,7 @@ class TicketCreateWithSet(ConstraintBaseTest):
def test_refuse_bad_loss_policy(self):
self.assert_pcs_fail(
'constraint ticket set A B setoptions ticket=T loss-policy=none',
- ["Error: 'none' is not a valid loss-policy value, use fence, stop, freeze, demote"]
+ ["Error: 'none' is not a valid loss-policy value, use demote, fence, freeze, stop"]
)
def test_refuse_when_ticket_option_is_missing(self):
@@ -2656,13 +2732,13 @@ class TicketAdd(ConstraintBaseTest):
def test_refuse_noexistent_resource_id(self):
self.assert_pcs_fail(
'constraint ticket add T master AA loss-policy=fence',
- ["Error: Resource 'AA' does not exist"]
+ ["Error: resource 'AA' does not exist"]
)
def test_refuse_invalid_role(self):
self.assert_pcs_fail(
'constraint ticket add T bad-role A loss-policy=fence',
- ["Error: 'bad-role' is not a valid rsc-role value, use Stopped, Started, Master, Slave"]
+ ["Error: 'bad-role' is not a valid rsc-role value, use Master, Slave, Started, Stopped"]
)
def test_refuse_duplicate_ticket(self):
@@ -2752,3 +2828,766 @@ class TicketShow(ConstraintBaseTest):
" set A B setoptions ticket=T",
]
)
+
+
+class ConstraintEffect(
+ unittest.TestCase,
+ get_assert_pcs_effect_mixin(
+ lambda cib: etree.tostring(
+ # pylint:disable=undefined-variable
+ etree.parse(cib).findall(".//constraints")[0]
+ )
+ )
+):
+ temp_cib = rc("temp-cib.xml")
+ empty_cib = rc("cib-empty.xml")
+
+ def setUp(self):
+ shutil.copy(self.empty_cib, self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
+
+ def fixture_primitive(self, name):
+ self.assert_pcs_success(
+ "resource create {0} ocf:heartbeat:Dummy".format(name)
+ )
+
+
+class LocationTypeId(ConstraintEffect):
+ # This was written while implementing rsc-pattern to location constraints.
+ # Thus it focuses only the new feature (rsc-pattern) and it is NOT a
+ # complete test of location constraints. Instead it relies on legacy tests
+ # to test location constraints with plain resource name.
+ def test_prefers(self):
+ self.fixture_primitive("A")
+ self.assert_effect(
+ [
+ "constraint location A prefers node1",
+ "constraint location %A prefers node1",
+ "constraint location resource%A prefers node1",
+ ],
+ """<constraints>
+ <rsc_location id="location-A-node1-INFINITY" node="node1"
+ rsc="A" score="INFINITY"
+ />
+ </constraints>"""
+ )
+
+ def test_avoids(self):
+ self.fixture_primitive("A")
+ self.assert_effect(
+ [
+ "constraint location A avoids node1",
+ "constraint location %A avoids node1",
+ "constraint location resource%A avoids node1",
+ ],
+ """<constraints>
+ <rsc_location id="location-A-node1--INFINITY" node="node1"
+ rsc="A" score="-INFINITY"
+ />
+ </constraints>"""
+ )
+
+ def test_add(self):
+ self.fixture_primitive("A")
+ self.assert_effect(
+ [
+ "constraint location add my-id A node1 INFINITY",
+ "constraint location add my-id %A node1 INFINITY",
+ "constraint location add my-id resource%A node1 INFINITY",
+ ],
+ """<constraints>
+ <rsc_location id="my-id" node="node1" rsc="A" score="INFINITY"/>
+ </constraints>"""
+ )
+
+ def test_rule(self):
+ self.fixture_primitive("A")
+ self.assert_effect(
+ [
+ "constraint location A rule '#uname' eq node1",
+ "constraint location %A rule '#uname' eq node1",
+ "constraint location resource%A rule '#uname' eq node1",
+ ],
+ """<constraints>
+ <rsc_location id="location-A" rsc="A">
+ <rule id="location-A-rule" score="INFINITY">
+ <expression id="location-A-rule-expr"
+ operation="eq" attribute="#uname" value="node1"
+ />
+ </rule>
+ </rsc_location>
+ </constraints>"""
+ )
+
+
+ at skip_unless_location_rsc_pattern
+class LocationTypePattern(ConstraintEffect):
+ # This was written while implementing rsc-pattern to location constraints.
+ # Thus it focuses only the new feature (rsc-pattern) and it is NOT a
+ # complete test of location constraints. Instead it relies on legacy tests
+ # to test location constraints with plain resource name.
+ empty_cib = rc("cib-empty-2.6.xml")
+
+ def stdout(self):
+ return ""
+
+ def test_prefers(self):
+ self.assert_effect(
+ "constraint location regexp%res_[0-9] prefers node1",
+ """<constraints>
+ <rsc_location id="location-res_0-9-node1-INFINITY" node="node1"
+ rsc-pattern="res_[0-9]" score="INFINITY"
+ />
+ </constraints>""",
+ self.stdout()
+ )
+
+ def test_avoids(self):
+ self.assert_effect(
+ "constraint location regexp%res_[0-9] avoids node1",
+ """<constraints>
+ <rsc_location id="location-res_0-9-node1--INFINITY" node="node1"
+ rsc-pattern="res_[0-9]" score="-INFINITY"
+ />
+ </constraints>""",
+ self.stdout()
+ )
+
+ def test_add(self):
+ self.assert_effect(
+ "constraint location add my-id regexp%res_[0-9] node1 INFINITY",
+ """<constraints>
+ <rsc_location id="my-id" node="node1" rsc-pattern="res_[0-9]"
+ score="INFINITY"
+ />
+ </constraints>""",
+ self.stdout()
+ )
+
+ def test_rule(self):
+ self.assert_effect(
+ "constraint location regexp%res_[0-9] rule '#uname' eq node1",
+ """<constraints>
+ <rsc_location id="location-res_0-9" rsc-pattern="res_[0-9]">
+ <rule id="location-res_0-9-rule" score="INFINITY">
+ <expression id="location-res_0-9-rule-expr"
+ operation="eq" attribute="#uname" value="node1"
+ />
+ </rule>
+ </rsc_location>
+ </constraints>""",
+ self.stdout()
+ )
+
+
+ at skip_unless_location_rsc_pattern
+class LocationTypePatternWithCibUpgrade(LocationTypePattern):
+ empty_cib = rc("cib-empty.xml")
+
+ def stdout(self):
+ return "Cluster CIB has been upgraded to latest version\n"
+
+
+ at skip_unless_location_rsc_pattern
+class LocationShowWithPattern(ConstraintBaseTest):
+ # This was written while implementing rsc-pattern to location constraints.
+ # Thus it focuses only the new feature (rsc-pattern) and it is NOT a
+ # complete test of location constraints. Instead it relies on legacy tests
+ # to test location constraints with plain resource name.
+ empty_cib = rc("cib-empty-2.6.xml")
+
+ def fixture(self):
+ self.assert_pcs_success_all([
+ "resource create R1 ocf:heartbeat:Dummy",
+ "resource create R2 ocf:heartbeat:Dummy",
+ "resource create R3 ocf:heartbeat:Dummy",
+
+ "constraint location R1 prefers node1 node2=20",
+ "constraint location R1 avoids node3=30 node4",
+ "constraint location R2 prefers node3 node4=20",
+ "constraint location R2 avoids node1=30 node2",
+ "constraint location regexp%R_[0-9]+ prefers node1 node2=20",
+ "constraint location regexp%R_[0-9]+ avoids node3=30",
+ "constraint location regexp%R_[a-z]+ avoids node3=30",
+
+ "constraint location add my-id1 R3 node1 -INFINITY resource-discovery=never",
+ "constraint location add my-id2 R3 node2 -INFINITY resource-discovery=never",
+ "constraint location add my-id3 regexp%R_[0-9]+ node4 -INFINITY resource-discovery=never",
+
+ "constraint location R1 rule score=-INFINITY date-spec operation=date_spec years=2005",
+ "constraint location R1 rule score=-INFINITY date-spec operation=date_spec years=2007",
+ "constraint location regexp%R_[0-9]+ rule score=-INFINITY date-spec operation=date_spec years=2006",
+ "constraint location regexp%R_[0-9]+ rule score=20 defined pingd",
+ ])
+
+ def test_show(self):
+ #pylint: disable=trailing-whitespace
+ self.fixture()
+ self.assert_pcs_success(
+ "constraint location show --full",
+ outdent(
+ """\
+ Location Constraints:
+ Resource pattern: R_[0-9]+
+ Enabled on: node1 (score:INFINITY) (id:location-R_0-9-node1-INFINITY)
+ Enabled on: node2 (score:20) (id:location-R_0-9-node2-20)
+ Disabled on: node3 (score:-30) (id:location-R_0-9-node3--30)
+ Disabled on: node4 (score:-INFINITY) (resource-discovery=never) (id:my-id3)
+ Constraint: location-R_0-9
+ Rule: score=-INFINITY (id:location-R_0-9-rule)
+ Expression: (id:location-R_0-9-rule-expr)
+ Date Spec: years=2006 (id:location-R_0-9-rule-expr-datespec)
+ Constraint: location-R_0-9-1
+ Rule: score=20 (id:location-R_0-9-1-rule)
+ Expression: defined pingd (id:location-R_0-9-1-rule-expr)
+ Resource pattern: R_[a-z]+
+ Disabled on: node3 (score:-30) (id:location-R_a-z-node3--30)
+ Resource: R1
+ Enabled on: node1 (score:INFINITY) (id:location-R1-node1-INFINITY)
+ Enabled on: node2 (score:20) (id:location-R1-node2-20)
+ Disabled on: node3 (score:-30) (id:location-R1-node3--30)
+ Disabled on: node4 (score:-INFINITY) (id:location-R1-node4--INFINITY)
+ Constraint: location-R1
+ Rule: score=-INFINITY (id:location-R1-rule)
+ Expression: (id:location-R1-rule-expr)
+ Date Spec: years=2005 (id:location-R1-rule-expr-datespec)
+ Constraint: location-R1-1
+ Rule: score=-INFINITY (id:location-R1-1-rule)
+ Expression: (id:location-R1-1-rule-expr)
+ Date Spec: years=2007 (id:location-R1-1-rule-expr-datespec)
+ Resource: R2
+ Enabled on: node3 (score:INFINITY) (id:location-R2-node3-INFINITY)
+ Enabled on: node4 (score:20) (id:location-R2-node4-20)
+ Disabled on: node1 (score:-30) (id:location-R2-node1--30)
+ Disabled on: node2 (score:-INFINITY) (id:location-R2-node2--INFINITY)
+ Resource: R3
+ Disabled on: node1 (score:-INFINITY) (resource-discovery=never) (id:my-id1)
+ Disabled on: node2 (score:-INFINITY) (resource-discovery=never) (id:my-id2)
+ """
+ )
+ )
+
+ self.assert_pcs_success(
+ "constraint location show",
+ outdent(
+ """\
+ Location Constraints:
+ Resource pattern: R_[0-9]+
+ Enabled on: node1 (score:INFINITY)
+ Enabled on: node2 (score:20)
+ Disabled on: node3 (score:-30)
+ Disabled on: node4 (score:-INFINITY) (resource-discovery=never)
+ Constraint: location-R_0-9
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2006
+ Constraint: location-R_0-9-1
+ Rule: score=20
+ Expression: defined pingd
+ Resource pattern: R_[a-z]+
+ Disabled on: node3 (score:-30)
+ Resource: R1
+ Enabled on: node1 (score:INFINITY)
+ Enabled on: node2 (score:20)
+ Disabled on: node3 (score:-30)
+ Disabled on: node4 (score:-INFINITY)
+ Constraint: location-R1
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2005
+ Constraint: location-R1-1
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2007
+ Resource: R2
+ Enabled on: node3 (score:INFINITY)
+ Enabled on: node4 (score:20)
+ Disabled on: node1 (score:-30)
+ Disabled on: node2 (score:-INFINITY)
+ Resource: R3
+ Disabled on: node1 (score:-INFINITY) (resource-discovery=never)
+ Disabled on: node2 (score:-INFINITY) (resource-discovery=never)
+ """
+ )
+ )
+
+ self.assert_pcs_success(
+ "constraint location show nodes --full",
+ outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Location Constraints:
+ Node:
+ Allowed to run:
+ Resource: R1 (location-R1) Score: 0
+ Resource: R1 (location-R1-1) Score: 0
+ Resource pattern: R_[0-9]+ (location-R_0-9) Score: 0
+ Resource pattern: R_[0-9]+ (location-R_0-9-1) Score: 0
+ Node: node1
+ Allowed to run:
+ Resource: R1 (location-R1-node1-INFINITY) Score: INFINITY
+ Resource pattern: R_[0-9]+ (location-R_0-9-node1-INFINITY) Score: INFINITY
+ Not allowed to run:
+ Resource: R2 (location-R2-node1--30) Score: -30
+ Resource: R3 (my-id1) (resource-discovery=never) Score: -INFINITY
+ Node: node2
+ Allowed to run:
+ Resource: R1 (location-R1-node2-20) Score: 20
+ Resource pattern: R_[0-9]+ (location-R_0-9-node2-20) Score: 20
+ Not allowed to run:
+ Resource: R2 (location-R2-node2--INFINITY) Score: -INFINITY
+ Resource: R3 (my-id2) (resource-discovery=never) Score: -INFINITY
+ Node: node3
+ Allowed to run:
+ Resource: R2 (location-R2-node3-INFINITY) Score: INFINITY
+ Not allowed to run:
+ Resource: R1 (location-R1-node3--30) Score: -30
+ Resource pattern: R_[0-9]+ (location-R_0-9-node3--30) Score: -30
+ Resource pattern: R_[a-z]+ (location-R_a-z-node3--30) Score: -30
+ Node: node4
+ Allowed to run:
+ Resource: R2 (location-R2-node4-20) Score: 20
+ Not allowed to run:
+ Resource: R1 (location-R1-node4--INFINITY) Score: -INFINITY
+ Resource pattern: R_[0-9]+ (my-id3) (resource-discovery=never) Score: -INFINITY
+ Resource pattern: R_[0-9]+
+ Constraint: location-R_0-9
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2006
+ Constraint: location-R_0-9-1
+ Rule: score=20
+ Expression: defined pingd
+ Resource: R1
+ Constraint: location-R1
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2005
+ Constraint: location-R1-1
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2007
+ """
+ )
+ )
+
+ self.assert_pcs_success(
+ "constraint location show nodes node2",
+ outdent(
+ """\
+ Location Constraints:
+ Node: node2
+ Allowed to run:
+ Resource: R1 (location-R1-node2-20) Score: 20
+ Resource pattern: R_[0-9]+ (location-R_0-9-node2-20) Score: 20
+ Not allowed to run:
+ Resource: R2 (location-R2-node2--INFINITY) Score: -INFINITY
+ Resource: R3 (my-id2) (resource-discovery=never) Score: -INFINITY
+ Resource pattern: R_[0-9]+
+ Constraint: location-R_0-9
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2006
+ Constraint: location-R_0-9-1
+ Rule: score=20
+ Expression: defined pingd
+ Resource: R1
+ Constraint: location-R1
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2005
+ Constraint: location-R1-1
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2007
+ """
+ )
+ )
+
+ self.assert_pcs_success(
+ "constraint location show resources regexp%R_[0-9]+",
+ outdent(
+ """\
+ Location Constraints:
+ Resource pattern: R_[0-9]+
+ Enabled on: node1 (score:INFINITY)
+ Enabled on: node2 (score:20)
+ Disabled on: node3 (score:-30)
+ Disabled on: node4 (score:-INFINITY) (resource-discovery=never)
+ Constraint: location-R_0-9
+ Rule: score=-INFINITY
+ Expression:
+ Date Spec: years=2006
+ Constraint: location-R_0-9-1
+ Rule: score=20
+ Expression: defined pingd
+ """
+ )
+ )
+
+
+class Bundle(ConstraintEffect):
+ empty_cib = rc("cib-empty-2.8.xml")
+
+ def setUp(self):
+ super(Bundle, self).setUp()
+ self.fixture_bundle("B")
+
+ def fixture_primitive(self, name, bundle=None):
+ #pylint:disable=arguments-differ
+ if not bundle:
+ super(Bundle, self).fixture_primitive(name)
+ return
+ self.assert_pcs_success(
+ "resource create {0} ocf:heartbeat:Dummy bundle {1}".format(
+ name, bundle
+ )
+ )
+
+ def fixture_bundle(self, name):
+ self.assert_pcs_success(
+ "resource bundle create {0} container image=pcs:test".format(
+ name
+ )
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleLocation(Bundle):
+ def test_bundle_prefers(self):
+ self.assert_effect(
+ "constraint location B prefers node1",
+ """
+ <constraints>
+ <rsc_location id="location-B-node1-INFINITY" node="node1"
+ rsc="B" score="INFINITY"
+ />
+ </constraints>
+ """
+ )
+
+ def test_bundle_avoids(self):
+ self.assert_effect(
+ "constraint location B avoids node1",
+ """
+ <constraints>
+ <rsc_location id="location-B-node1--INFINITY" node="node1"
+ rsc="B" score="-INFINITY"
+ />
+ </constraints>
+ """
+ )
+
+ def test_bundle_location(self):
+ self.assert_effect(
+ "constraint location add id B node1 100",
+ """
+ <constraints>
+ <rsc_location id="id" node="node1" rsc="B" score="100" />
+ </constraints>
+ """
+ )
+
+ def test_primitive_prefers(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint location R prefers node1",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints. Use --force to override.\n"
+ )
+
+ def test_primitive_prefers_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint location R prefers node1 --force",
+ """
+ <constraints>
+ <rsc_location id="location-R-node1-INFINITY" node="node1"
+ rsc="R" score="INFINITY"
+ />
+ </constraints>
+ """
+ )
+
+ def test_primitive_avoids(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint location R avoids node1",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints. Use --force to override.\n"
+ )
+
+ def test_primitive_avoids_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint location R avoids node1 --force",
+ """
+ <constraints>
+ <rsc_location id="location-R-node1--INFINITY" node="node1"
+ rsc="R" score="-INFINITY"
+ />
+ </constraints>
+ """
+ )
+
+ def test_primitive_location(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint location add id R node1 100",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints. Use --force to override.\n"
+ )
+
+ def test_primitive_location_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint location add id R node1 100 --force",
+ """
+ <constraints>
+ <rsc_location id="id" node="node1" rsc="R" score="100" />
+ </constraints>
+ """
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleColocation(Bundle):
+ def setUp(self):
+ super(BundleColocation, self).setUp()
+ self.fixture_primitive("X")
+
+ def test_bundle(self):
+ self.assert_effect(
+ "constraint colocation add B with X",
+ """
+ <constraints>
+ <rsc_colocation id="colocation-B-X-INFINITY"
+ rsc="B" with-rsc="X" score="INFINITY" />
+ </constraints>
+ """
+ )
+
+ def test_primitive(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint colocation add R with X",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints. Use --force to override.\n"
+ )
+
+ def test_primitive_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint colocation add R with X --force",
+ """
+ <constraints>
+ <rsc_colocation id="colocation-R-X-INFINITY"
+ rsc="R" with-rsc="X" score="INFINITY" />
+ </constraints>
+ """
+ )
+
+ def test_bundle_set(self):
+ self.assert_effect(
+ "constraint colocation set B X",
+ """
+ <constraints>
+ <rsc_colocation id="pcs_rsc_colocation_set_B_X"
+ score="INFINITY"
+ >
+ <resource_set id="pcs_rsc_set_B_X">
+ <resource_ref id="B" />
+ <resource_ref id="X" />
+ </resource_set>
+ </rsc_colocation>
+ </constraints>
+ """
+ )
+
+ def test_primitive_set(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint colocation set R X",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints, use --force to override\n"
+ )
+
+ def test_primitive_set_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint colocation set R X --force",
+ """
+ <constraints>
+ <rsc_colocation id="pcs_rsc_colocation_set_R_X"
+ score="INFINITY"
+ >
+ <resource_set id="pcs_rsc_set_R_X">
+ <resource_ref id="R" />
+ <resource_ref id="X" />
+ </resource_set>
+ </rsc_colocation>
+ </constraints>
+ """,
+ "Warning: R is a bundle resource, you should use the bundle id: B when adding constraints\n"
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleOrder(Bundle):
+ def setUp(self):
+ super(BundleOrder, self).setUp()
+ self.fixture_primitive("X")
+
+ def test_bundle(self):
+ self.assert_effect(
+ "constraint order B then X",
+ """
+ <constraints>
+ <rsc_order id="order-B-X-mandatory"
+ first="B" first-action="start"
+ then="X" then-action="start" />
+ </constraints>
+ """,
+ "Adding B X (kind: Mandatory) (Options: first-action=start "
+ "then-action=start)\n"
+ )
+
+ def test_primitive(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint order R then X",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints. Use --force to override.\n"
+ )
+
+ def test_primitive_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint order R then X --force",
+ """
+ <constraints>
+ <rsc_order id="order-R-X-mandatory"
+ first="R" first-action="start"
+ then="X" then-action="start" />
+ </constraints>
+ """,
+ "Adding R X (kind: Mandatory) (Options: first-action=start "
+ "then-action=start)\n"
+ )
+
+ def test_bundle_set(self):
+ self.assert_effect(
+ "constraint order set B X",
+ """
+ <constraints>
+ <rsc_order id="pcs_rsc_order_set_B_X">
+ <resource_set id="pcs_rsc_set_B_X">
+ <resource_ref id="B" />
+ <resource_ref id="X" />
+ </resource_set>
+ </rsc_order>
+ </constraints>
+ """
+ )
+
+ def test_primitive_set(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint order set R X",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints, use --force to override\n"
+ )
+
+ def test_primitive_set_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint order set R X --force",
+ """
+ <constraints>
+ <rsc_order id="pcs_rsc_order_set_R_X">
+ <resource_set id="pcs_rsc_set_R_X">
+ <resource_ref id="R" />
+ <resource_ref id="X" />
+ </resource_set>
+ </rsc_order>
+ </constraints>
+ """,
+ "Warning: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints\n"
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleTicket(Bundle):
+ def setUp(self):
+ super(BundleTicket, self).setUp()
+
+ def test_bundle(self):
+ self.assert_effect(
+ "constraint ticket add T B",
+ """
+ <constraints>
+ <rsc_ticket id="ticket-T-B" rsc="B" ticket="T" />
+ </constraints>
+ """
+ )
+
+ def test_primitive(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint ticket add T R",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints, use --force to override\n"
+ )
+
+ def test_primitive_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint ticket add T R --force",
+ """
+ <constraints>
+ <rsc_ticket id="ticket-T-R" rsc="R" ticket="T" />
+ </constraints>
+ """,
+ "Warning: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints\n"
+ )
+
+ def test_bundle_set(self):
+ self.assert_effect(
+ "constraint ticket set B setoptions ticket=T",
+ """
+ <constraints>
+ <rsc_ticket id="pcs_rsc_ticket_set_B" ticket="T">
+ <resource_set id="pcs_rsc_set_B">
+ <resource_ref id="B" />
+ </resource_set>
+ </rsc_ticket>
+ </constraints>
+ """
+ )
+
+ def test_primitive_set(self):
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail(
+ "constraint ticket set R setoptions ticket=T",
+ "Error: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints, use --force to override\n"
+ )
+
+ def test_primitive_set_force(self):
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "constraint ticket set R setoptions ticket=T --force",
+ """
+ <constraints>
+ <rsc_ticket id="pcs_rsc_ticket_set_R" ticket="T">
+ <resource_set id="pcs_rsc_set_R">
+ <resource_ref id="R" />
+ </resource_set>
+ </rsc_ticket>
+ </constraints>
+ """,
+ "Warning: R is a bundle resource, you should use the bundle id: B "
+ "when adding constraints\n"
+ )
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
index 1487eb4..70c76f0 100644
--- a/pcs/test/test_lib_commands_quorum.py
+++ b/pcs/test/test_lib_commands_quorum.py
@@ -406,7 +406,7 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "invalid",
+ "option_names": ["invalid"],
"option_type": "quorum",
"allowed": [
"auto_tie_breaker",
@@ -692,7 +692,7 @@ class AddDeviceTest(TestCase, CmanMixin):
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_option",
+ "option_names": ["bad_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
},
@@ -730,7 +730,7 @@ class AddDeviceTest(TestCase, CmanMixin):
severity.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_option",
+ "option_names": ["bad_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
}
@@ -1581,6 +1581,7 @@ class RemoveDeviceTest(TestCase, CmanMixin):
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
@mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
@mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
def test_success_3nodes_sbd(
self, mock_remote_stop, mock_remote_disable, mock_remove_net,
mock_get_corosync, mock_push_corosync
@@ -1625,6 +1626,7 @@ class RemoveDeviceTest(TestCase, CmanMixin):
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
@mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: False)
@mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: False)
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
def test_success_2nodes_no_sbd(
self, mock_remote_stop, mock_remote_disable, mock_remove_net,
mock_get_corosync, mock_push_corosync
@@ -1668,6 +1670,7 @@ class RemoveDeviceTest(TestCase, CmanMixin):
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
@mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
@mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: [])
def test_success_2nodes_sbd(
self, mock_remote_stop, mock_remote_disable, mock_remove_net,
mock_get_corosync, mock_push_corosync
@@ -1717,6 +1720,51 @@ class RemoveDeviceTest(TestCase, CmanMixin):
self.assertEqual(2, len(mock_remote_disable.mock_calls))
self.assertEqual(2, len(mock_remote_stop.mock_calls))
+ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
+ @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
+ @mock.patch("pcs.lib.sbd.get_local_sbd_device_list", lambda: ["/dev"])
+ def test_success_2nodes_sbd_with_device(
+ self, mock_remote_stop, mock_remote_disable, mock_remove_net,
+ mock_get_corosync, mock_push_corosync
+ ):
+ # cluster consists of two nodes, but SBD with shared storage is in use
+ # auto tie breaker doesn't need to be enabled
+ original_conf = open(rc("corosync-qdevice.conf")).read()
+ no_device_conf = open(rc("corosync.conf")).read()
+ mock_get_corosync.return_value = original_conf
+ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+ lib.remove_device(lib_env)
+
+ self.assertEqual(1, len(mock_push_corosync.mock_calls))
+ ac(
+ mock_push_corosync.mock_calls[0][1][0].config.export(),
+ no_device_conf
+ )
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ [
+ (
+ severity.INFO,
+ report_codes.SERVICE_DISABLE_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ (
+ severity.INFO,
+ report_codes.SERVICE_STOP_STARTED,
+ {
+ "service": "corosync-qdevice",
+ }
+ ),
+ ]
+ )
+ self.assertEqual(1, len(mock_remove_net.mock_calls))
+ self.assertEqual(2, len(mock_remote_disable.mock_calls))
+ self.assertEqual(2, len(mock_remote_stop.mock_calls))
+
@mock.patch("pcs.lib.sbd.atb_has_to_be_enabled")
@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
def test_success_file(
@@ -2008,7 +2056,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_option",
+ "option_names": ["bad_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
},
@@ -2039,7 +2087,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
severity.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_option",
+ "option_names": ["bad_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
}
diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
index d323252..6017371 100644
--- a/pcs/test/test_lib_commands_sbd.py
+++ b/pcs/test/test_lib_commands_sbd.py
@@ -15,7 +15,9 @@ from pcs.test.tools.assertions import (
assert_report_item_list_equal,
)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.integration_lib import Runner, Call
+from pcs import settings
from pcs.common import report_codes
from pcs.lib.errors import (
ReportItemSeverity as Severities,
@@ -35,6 +37,9 @@ from pcs.lib.external import (
import pcs.lib.commands.sbd as cmd_sbd
+runner = Runner()
+
+
def _assert_equal_list_of_dictionaries_without_order(expected, actual):
for item in actual:
if item not in expected:
@@ -90,7 +95,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_UNKNOWN",
+ "option_names": ["SBD_UNKNOWN"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -100,7 +105,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "another_unknown_option",
+ "option_names": ["another_unknown_option"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -125,7 +130,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_UNKNOWN",
+ "option_names": ["SBD_UNKNOWN"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -135,7 +140,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "another_unknown_option",
+ "option_names": ["another_unknown_option"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -151,7 +156,8 @@ class ValidateSbdOptionsTest(TestCase):
"SBD_WATCHDOG_TIMEOUT": "5",
"SBD_STARTMODE": "clean",
"SBD_WATCHDOG_DEV": "/dev/watchdog",
- "SBD_OPTS": " "
+ "SBD_OPTS": " ",
+ "SBD_DEVICE": "/dev/vda",
}
assert_report_item_list_equal(
@@ -161,7 +167,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_WATCHDOG_DEV",
+ "option_names": ["SBD_WATCHDOG_DEV"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -171,7 +177,17 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_OPTS",
+ "option_names": ["SBD_OPTS"],
+ "option_type": None,
+ "allowed": self.allowed_sbd_options,
+ },
+ None
+ ),
+ (
+ Severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": ["SBD_DEVICE"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -197,7 +213,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_WATCHDOG_DEV",
+ "option_names": ["SBD_WATCHDOG_DEV"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -207,7 +223,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_OPTS",
+ "option_names": ["SBD_OPTS"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -217,7 +233,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_UNKNOWN",
+ "option_names": ["SBD_UNKNOWN"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -244,7 +260,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_WATCHDOG_DEV",
+ "option_names": ["SBD_WATCHDOG_DEV"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -254,7 +270,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_OPTS",
+ "option_names": ["SBD_OPTS"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -264,7 +280,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_UNKNOWN",
+ "option_names": ["SBD_UNKNOWN"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -274,7 +290,7 @@ class ValidateSbdOptionsTest(TestCase):
Severities.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "SBD_PACEMAKER",
+ "option_names": ["SBD_PACEMAKER"],
"option_type": None,
"allowed": self.allowed_sbd_options,
},
@@ -304,7 +320,7 @@ class ValidateSbdOptionsTest(TestCase):
{
"option_name": "SBD_WATCHDOG_TIMEOUT",
"option_value": "-1",
- "allowed_values": "nonnegative integer",
+ "allowed_values": "a non-negative integer",
},
None
)
@@ -325,7 +341,7 @@ class ValidateSbdOptionsTest(TestCase):
{
"option_name": "SBD_WATCHDOG_TIMEOUT",
"option_value": "not int",
- "allowed_values": "nonnegative integer",
+ "allowed_values": "a non-negative integer",
},
None
)
@@ -346,7 +362,7 @@ class ValidateSbdOptionsTest(TestCase):
{
"option_name": "SBD_WATCHDOG_TIMEOUT",
"option_value": None,
- "allowed_values": "nonnegative integer",
+ "allowed_values": "a non-negative integer",
},
None
)
@@ -354,81 +370,183 @@ class ValidateSbdOptionsTest(TestCase):
)
-class GetFullWatchdogListTest(TestCase):
- def setUp(self):
- self.node_list = NodeAddressesList(
- [NodeAddresses("node" + str(i)) for i in range(5)]
- )
+class ValidateWatchdogDictTest(TestCase):
+ def test_all_ok(self):
+ watchdog_dict = {
+ NodeAddresses("node1"): "/dev/watchdog1",
+ NodeAddresses("node2"): "/dev/watchdog2",
+ }
+ self.assertEqual([], cmd_sbd._validate_watchdog_dict(watchdog_dict))
- def test_full(self):
+ def test_some_not_ok(self):
watchdog_dict = {
- self.node_list[1].label: "/dev/watchdog1",
- self.node_list[2].label: "/dev/watchdog2"
+ NodeAddresses("node1"): "",
+ NodeAddresses("node2"): None,
+ NodeAddresses("node3"): "/dev/watchdog",
+ NodeAddresses("node4"): "../dev/watchdog",
}
- expected = {
- self.node_list[0]: "/dev/default",
- self.node_list[1]: "/dev/watchdog1",
- self.node_list[2]: "/dev/watchdog2",
- self.node_list[3]: "/dev/default",
- self.node_list[4]: "/dev/default",
+ assert_report_item_list_equal(
+ cmd_sbd._validate_watchdog_dict(watchdog_dict),
+ [
+ (
+ Severities.ERROR,
+ report_codes.WATCHDOG_INVALID,
+ {"watchdog": watchdog}
+ ) for watchdog in ["", None, "../dev/watchdog"]
+ ]
+ )
+
+
+class ValidateDeviceDictTest(TestCase):
+ def test_all_ok(self):
+ device_dict = {
+ NodeAddresses("node1"): ["/dev1", "/dev2"],
+ NodeAddresses("node2"): ["/dev1"],
}
+ self.assertEqual([], cmd_sbd._validate_device_dict(device_dict))
+
+ def test_some_not_ok(self):
+ too_many_devices = [
+ "dev" + str(i) for i in range(settings.sbd_max_device_num + 1)
+ ]
+ device_dict = {
+ NodeAddresses("node1"): [],
+ NodeAddresses("node2"): too_many_devices,
+ NodeAddresses("node3"): ["/dev/vda"],
+ NodeAddresses("node4"): ["/dev/vda1", "../dev/sda2"],
+ }
+ assert_report_item_list_equal(
+ cmd_sbd._validate_device_dict(device_dict),
+ [
+ (
+ Severities.ERROR,
+ report_codes.SBD_NO_DEVICE_FOR_NODE,
+ {
+ "node": "node1",
+ }
+ ),
+ (
+ Severities.ERROR,
+ report_codes.SBD_TOO_MANY_DEVICES_FOR_NODE,
+ {
+ "node": "node2",
+ "device_list": too_many_devices,
+ "max_devices": settings.sbd_max_device_num,
+ }
+ ),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_PATH_NOT_ABSOLUTE,
+ {
+ "node": "node4",
+ "device": "../dev/sda2",
+ }
+ ),
+ ]
+ )
+
+
+class CheckNodeNamesInClusterTest(TestCase):
+ def setUp(self):
+ self.node_list = NodeAddressesList([
+ NodeAddresses("node1"),
+ NodeAddresses("node2"),
+ NodeAddresses("node3"),
+ ])
+
+ def test_all_ok(self):
+ node_name_list = ["node1", "node3"]
self.assertEqual(
- cmd_sbd._get_full_watchdog_list(
- self.node_list, "/dev/default", watchdog_dict
- ),
- expected
+ [],
+ cmd_sbd._check_node_names_in_cluster(self.node_list, node_name_list)
)
- def test_unknown_nodes(self):
- watchdog_dict = {
- self.node_list[1].label: "/dev/watchdog1",
- self.node_list[2].label: "/dev/watchdog2",
- "unknown_node": "/dev/watchdog0",
- "another_unknown_node": "/dev/watchdog"
- }
- assert_raise_library_error(
- lambda: cmd_sbd._get_full_watchdog_list(
- self.node_list, "/dev/dog", watchdog_dict
+ def test_repeating_nodes(self):
+ node_name_list = ["node1", "node3", "node1"]
+ self.assertEqual(
+ [],
+ cmd_sbd._check_node_names_in_cluster(self.node_list, node_name_list)
+ )
+
+ def test_one_not_found(self):
+ node_name_list = ["node0", "node3"]
+ assert_report_item_list_equal(
+ cmd_sbd._check_node_names_in_cluster(
+ self.node_list, node_name_list
),
- (
+ [(
Severities.ERROR,
report_codes.NODE_NOT_FOUND,
- {"node": "unknown_node"}
+ {"node": "node0"}
+ )]
+ )
+
+ def test_multiple_not_found(self):
+ node_name_list = ["node0", "node3", "node4"]
+ assert_report_item_list_equal(
+ cmd_sbd._check_node_names_in_cluster(
+ self.node_list, node_name_list
),
- (
- Severities.ERROR,
- report_codes.NODE_NOT_FOUND,
- {"node": "another_unknown_node"}
- )
+ [
+ (
+ Severities.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {"node": node}
+ ) for node in ["node0", "node4"]
+ ]
+ )
+
+ def test_multiple_not_found_repeating(self):
+ node_name_list = ["node0", "node3", "node4", "node0", "node1", "node3"]
+ assert_report_item_list_equal(
+ cmd_sbd._check_node_names_in_cluster(
+ self.node_list, node_name_list
+ ),
+ [
+ (
+ Severities.ERROR,
+ report_codes.NODE_NOT_FOUND,
+ {"node": node}
+ ) for node in ["node0", "node4"]
+ ]
)
- def test_invalid_watchdogs(self):
- watchdog_dict = {
- self.node_list[1].label: "",
- self.node_list[2].label: None,
- self.node_list[3].label: "not/abs/path",
- self.node_list[4].label: "/dev/watchdog"
+class GetFullNodeDictTest(TestCase):
+ def setUp(self):
+ self.node_list = NodeAddressesList([
+ NodeAddresses("node1"),
+ NodeAddresses("node2"),
+ NodeAddresses("node3"),
+ ])
+
+ def test_not_using_default(self):
+ node_dict = dict([
+ ("node" + str(i), "val" + str(i)) for i in range(4)
+ ])
+ expected = {
+ self.node_list[0]: "val1",
+ self.node_list[1]: "val2",
+ self.node_list[2]: "val3",
}
- assert_raise_library_error(
- lambda: cmd_sbd._get_full_watchdog_list(
- self.node_list, "/dev/dog", watchdog_dict
- ),
- (
- Severities.ERROR,
- report_codes.WATCHDOG_INVALID,
- {"watchdog": ""}
- ),
- (
- Severities.ERROR,
- report_codes.WATCHDOG_INVALID,
- {"watchdog": None}
- ),
- (
- Severities.ERROR,
- report_codes.WATCHDOG_INVALID,
- {"watchdog": "not/abs/path"}
- )
+ self.assertEqual(
+ expected,
+ cmd_sbd._get_full_node_dict(self.node_list, node_dict, None)
+ )
+
+ def test_using_default(self):
+ node_dict = dict([
+ ("node" + str(i), "val" + str(i)) for i in range(3)
+ ])
+ default = "default"
+ expected = {
+ self.node_list[0]: "val1",
+ self.node_list[1]: "val2",
+ self.node_list[2]: default,
+ }
+ self.assertEqual(
+ expected,
+ cmd_sbd._get_full_node_dict(self.node_list, node_dict, default)
)
@@ -436,7 +554,7 @@ class GetFullWatchdogListTest(TestCase):
@mock.patch("pcs.lib.sbd.check_sbd")
class GetClusterSbdStatusTest(CommandSbdTest):
def test_success(self, mock_check_sbd, mock_get_nodes):
- def ret_val(communicator, node, empty_str):
+ def ret_val(communicator, node, empty_str, empty_list):
self.assertEqual(communicator, self.mock_com)
self.assertEqual(empty_str, "")
if node.label == "node0":
@@ -475,7 +593,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
mock_get_nodes.return_value = self.node_list
expected = [
{
- "node": self.node_list.find_by_label("node0"),
+ "node": "node0",
"status": {
"installed": True,
"enabled": True,
@@ -483,7 +601,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
}
},
{
- "node": self.node_list.find_by_label("node1"),
+ "node": "node1",
"status": {
"installed": False,
"enabled": False,
@@ -491,7 +609,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
}
},
{
- "node": self.node_list.find_by_label("node2"),
+ "node": "node2",
"status": {
"installed": True,
"enabled": False,
@@ -507,7 +625,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
self.assertEqual(self.mock_log.warning.call_count, 0)
def test_failures(self, mock_check_sbd, mock_get_nodes):
- def ret_val(communicator, node, empty_str):
+ def ret_val(communicator, node, empty_str, empty_list):
self.assertEqual(communicator, self.mock_com)
self.assertEqual(empty_str, "")
if node.label == "node0":
@@ -539,15 +657,15 @@ class GetClusterSbdStatusTest(CommandSbdTest):
}
expected = [
{
- "node": self.node_list.find_by_label("node0"),
+ "node": "node0",
"status": all_none
},
{
- "node": self.node_list.find_by_label("node1"),
+ "node": "node1",
"status": all_none
},
{
- "node": self.node_list.find_by_label("node2"),
+ "node": "node2",
"status": all_none
}
]
@@ -624,18 +742,18 @@ OPTION= value
mock_get_nodes.return_value = self.node_list
expected = [
{
- "node": self.node_list.find_by_label("node0"),
+ "node": "node0",
"config": {
"SBD_TEST": "true",
"ANOTHER_OPT": "1"
}
},
{
- "node": self.node_list.find_by_label("node1"),
+ "node": "node1",
"config": {"OPTION": "value"}
},
{
- "node": self.node_list.find_by_label("node2"),
+ "node": "node2",
"config": {}
}
]
@@ -675,18 +793,18 @@ invalid value
mock_get_nodes.return_value = self.node_list
expected = [
{
- "node": self.node_list.find_by_label("node0"),
+ "node": "node0",
"config": {
"SBD_TEST": "true",
"ANOTHER_OPT": "1"
}
},
{
- "node": self.node_list.find_by_label("node1"),
+ "node": "node1",
"config": {}
},
{
- "node": self.node_list.find_by_label("node2"),
+ "node": "node2",
"config": None
}
]
@@ -754,3 +872,359 @@ SBD_WATCHDOG_TIMEOUT=0
{}
)
)
+
+
+class CommonTest(TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.patcher = mock.patch.object(
+ LibraryEnvironment,
+ "cmd_runner",
+ lambda self: runner
+ )
+ cls.patcher.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.patcher.stop()
+
+ def setUp(self):
+ self.env = LibraryEnvironment(
+ mock.MagicMock(logging.Logger),
+ MockLibraryReportProcessor()
+ )
+
+
+class InitializeBlockDevicesTest(CommonTest):
+ def fixture_sbd_init(
+ self, device_list, options, stdout="", stderr="", return_code=0
+ ):
+ cmd = ["sbd"]
+ for device in device_list:
+ cmd += ["-d", device]
+
+ for opt, val in options:
+ cmd += [opt, val]
+
+ cmd.append("create")
+ return [Call(" ".join(cmd), stdout, stderr, return_code)]
+
+ def fixture_invalid_value(self, option, value):
+ return (
+ Severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": option,
+ "option_value": value,
+ "allowed_values": "a non-negative integer",
+ }
+ )
+
+ def test_all_options(self):
+ device_list = ["dev1", "dev2"]
+ option_dict = {
+ "watchdog-timeout": "1",
+ "loop-timeout": "10",
+ "allocate-timeout": "3",
+ "msgwait-timeout": "2",
+
+ }
+ runner.set_runs(self.fixture_sbd_init(
+ device_list,
+ [
+ ("-2", "3"),
+ ("-3", "10"),
+ ("-4", "2"),
+ ("-1", "1"),
+ ]
+ ))
+ cmd_sbd.initialize_block_devices(self.env, device_list, option_dict)
+ runner.assert_everything_launched()
+ self.env.report_processor.assert_reports([
+ (
+ Severities.INFO,
+ report_codes.SBD_DEVICE_INITIALIZATION_STARTED,
+ {"device_list": device_list}
+ ),
+ (
+ Severities.INFO,
+ report_codes.SBD_DEVICE_INITIALIZATION_SUCCESS,
+ {"device_list": device_list}
+ ),
+ ])
+
+ def test_no_options(self):
+ device_list = ["dev1", "dev2"]
+ runner.set_runs(self.fixture_sbd_init(device_list,{}))
+ cmd_sbd.initialize_block_devices(self.env, device_list, {})
+ runner.assert_everything_launched()
+ self.env.report_processor.assert_reports([
+ (
+ Severities.INFO,
+ report_codes.SBD_DEVICE_INITIALIZATION_STARTED,
+ {"device_list": device_list}
+ ),
+ (
+ Severities.INFO,
+ report_codes.SBD_DEVICE_INITIALIZATION_SUCCESS,
+ {"device_list": device_list}
+ ),
+ ])
+
+ def test_validation_failed(self):
+ option_dict = {
+ "unknown_option": "val",
+ "watchdog-timeout": "-1",
+ "another_one": "-1",
+ "loop-timeout": "-3",
+ "allocate-timeout": "-3",
+ "msgwait-timeout": "-2",
+ }
+ allowed_options = [
+ "watchdog-timeout", "loop-timeout", "allocate-timeout",
+ "msgwait-timeout",
+ ]
+ assert_raise_library_error(
+ lambda: cmd_sbd.initialize_block_devices(self.env, [], option_dict),
+ (
+ Severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": ["device"],
+ "option_type": None,
+ }
+ ),
+ (
+ Severities.ERROR,
+ report_codes.INVALID_OPTION,
+ {
+ "option_names": sorted(["another_one", "unknown_option"]),
+ "option_type": "option",
+ "allowed": sorted(allowed_options),
+ }
+ ),
+ *[
+ self.fixture_invalid_value(opt, option_dict[opt])
+ for opt in allowed_options
+ ]
+ )
+
+
+ at mock.patch("os.path.exists")
+ at mock.patch("pcs.lib.sbd.get_local_sbd_config")
+ at mock.patch("pcs.lib.external.is_systemctl", lambda: True)
+class GetLocalDevicesInfoTest(CommonTest):
+ def fixture_sbd_enabled(self, enabled):
+ cmd = [settings.systemctl_binary, "is-enabled", "sbd.service"]
+ return [Call(" ".join(cmd), returncode=0 if enabled else 1)]
+
+ def fixture_sbd_info(self, device, stdout="", return_code=0):
+ cmd = ["sbd", "-d", device, "list"]
+ return [Call(" ".join(cmd), stdout, returncode=return_code)]
+
+ def fixture_sbd_dump(self, device, stdout="", return_code=0):
+ cmd = ["sbd", "-d", device, "dump"]
+ return [Call(" ".join(cmd), stdout, returncode=return_code)]
+
+ def test_success(self, mock_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ mock_config.return_value = """
+SBD_DEVICE="/dev1;/dev2"
+ """
+ runner.set_runs(
+ self.fixture_sbd_enabled(True) +
+ self.fixture_sbd_info("/dev1", "1") +
+ self.fixture_sbd_info("/dev2", "2")
+ )
+ expected_output = [
+ {
+ "device": "/dev1",
+ "list": "1",
+ "dump": None,
+ },
+ {
+ "device": "/dev2",
+ "list": "2",
+ "dump": None,
+ },
+ ]
+ self.assertEqual(
+ expected_output, cmd_sbd.get_local_devices_info(self.env)
+ )
+ runner.assert_everything_launched()
+ mock_config.assert_called_once_with()
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+
+ def test_with_dump(self, mock_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ mock_config.return_value = """
+SBD_DEVICE="/dev1;/dev2"
+ """
+ runner.set_runs(
+ self.fixture_sbd_enabled(True) +
+ self.fixture_sbd_info("/dev1", "1") +
+ self.fixture_sbd_dump("/dev1", "3") +
+ self.fixture_sbd_info("/dev2", "2") +
+ self.fixture_sbd_dump("/dev2", "4")
+ )
+ expected_output = [
+ {
+ "device": "/dev1",
+ "list": "1",
+ "dump": "3",
+ },
+ {
+ "device": "/dev2",
+ "list": "2",
+ "dump": "4",
+ },
+ ]
+ self.assertEqual(
+ expected_output, cmd_sbd.get_local_devices_info(self.env, dump=True)
+ )
+ runner.assert_everything_launched()
+ mock_config.assert_called_once_with()
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+
+ def test_no_config(self, mock_config, mock_config_exists):
+ mock_config_exists.return_value = False
+ runner.set_runs(self.fixture_sbd_enabled(True))
+ self.assertEqual([], cmd_sbd.get_local_devices_info(self.env))
+ runner.assert_everything_launched()
+ self.assertEqual(0, mock_config.call_count)
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+
+ def test_sbd_disabled(self, mock_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ runner.set_runs(self.fixture_sbd_enabled(False))
+ self.assertEqual([], cmd_sbd.get_local_devices_info(self.env))
+ runner.assert_everything_launched()
+ self.assertEqual(0, mock_config.call_count)
+ self.assertEqual(0, mock_config_exists.call_count)
+
+ def test_with_failures(self, mock_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ mock_config.return_value = """
+SBD_DEVICE="/dev1;/dev2;/dev3"
+ """
+ runner.set_runs(
+ self.fixture_sbd_enabled(True) +
+ self.fixture_sbd_info("/dev1", "1", 1) +
+ self.fixture_sbd_info("/dev2", "2") +
+ self.fixture_sbd_dump("/dev2", "4", 1) +
+ self.fixture_sbd_info("/dev3", "5") +
+ self.fixture_sbd_dump("/dev3", "6")
+ )
+ expected_output = [
+ {
+ "device": "/dev1",
+ "list": None,
+ "dump": None,
+ },
+ {
+ "device": "/dev2",
+ "list": "2",
+ "dump": None,
+ },
+ {
+ "device": "/dev3",
+ "list": "5",
+ "dump": "6",
+ },
+ ]
+ self.assertEqual(
+ expected_output, cmd_sbd.get_local_devices_info(self.env, dump=True)
+ )
+ self.env.report_processor.assert_reports([
+ (
+ Severities.WARNING,
+ report_codes.SBD_DEVICE_LIST_ERROR,
+ {
+ "device": "/dev1",
+ "reason": "1"
+ }
+ ),
+ (
+ Severities.WARNING,
+ report_codes.SBD_DEVICE_DUMP_ERROR,
+ {
+ "device": "/dev2",
+ "reason": "4"
+ }
+ ),
+ ])
+ runner.assert_everything_launched()
+ mock_config.assert_called_once_with()
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+
+
+class SetMessageTest(CommonTest):
+ def fixture_call_sbd_message(
+ self, device, node, message, stderr="", return_code=0
+ ):
+ return [Call(
+ "sbd -d {0} message {1} {2}".format(device, node, message),
+ stderr=stderr,
+ returncode=return_code
+ )]
+
+ def test_empty_options(self):
+ assert_raise_library_error(
+ lambda: cmd_sbd.set_message(self.env, "", "", ""),
+ (
+ Severities.ERROR,
+ report_codes.REQUIRED_OPTION_IS_MISSING,
+ {
+ "option_names": ["device", "node"],
+ "option_type": None,
+ }
+ ),
+ (
+ Severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "message",
+ "option_value": "",
+ "allowed_values": settings.sbd_message_types,
+ }
+ )
+ )
+
+ def test_invalid_message_type(self):
+ assert_raise_library_error(
+ lambda: cmd_sbd.set_message(self.env, "device", "node1", "message"),
+ (
+ Severities.ERROR,
+ report_codes.INVALID_OPTION_VALUE,
+ {
+ "option_name": "message",
+ "option_value": "message",
+ "allowed_values": settings.sbd_message_types,
+ }
+ )
+ )
+
+ def test_success(self):
+ runner.set_runs(self.fixture_call_sbd_message("device", "node", "test"))
+ cmd_sbd.set_message(self.env, "device", "node", "test")
+ runner.assert_everything_launched()
+
+ def test_failuer(self):
+ runner.set_runs(
+ self.fixture_call_sbd_message("device", "node", "test", "error", 1)
+ )
+ assert_raise_library_error(
+ lambda: cmd_sbd.set_message(self.env, "device", "node", "test"),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_MESSAGE_ERROR,
+ {
+ "device": "device",
+ "node": "node",
+ "message": "test",
+ "reason": "error",
+ }
+ )
+ )
+ runner.assert_everything_launched()
+
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
index 4373d65..1bf4716 100644
--- a/pcs/test/test_lib_corosync_config_facade.py
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -582,7 +582,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "nonsense1",
+ "option_names": ["nonsense1"],
"option_type": "quorum",
"allowed": [
"auto_tie_breaker",
@@ -596,7 +596,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "nonsense2",
+ "option_names": ["nonsense2"],
"option_type": "quorum",
"allowed": [
"auto_tie_breaker",
@@ -1207,12 +1207,8 @@ quorum {
(
severity.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "host"}
- ),
- (
- severity.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "algorithm"}
+ {"option_names": ["algorithm", "host"]},
+ None
)
)
self.assertFalse(facade.need_stopped_cluster)
@@ -1257,7 +1253,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_model_option",
+ "option_names": ["bad_model_option"],
"option_type": "quorum device model",
"allowed": [
"algorithm",
@@ -1293,7 +1289,7 @@ quorum {
(
severity.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "host"}
+ {"option_names": ["host"]}
),
(
severity.ERROR,
@@ -1319,7 +1315,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_generic_option",
+ "option_names": ["bad_generic_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
},
@@ -1329,7 +1325,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "model",
+ "option_names": ["model"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
}
@@ -1371,12 +1367,8 @@ quorum {
(
severity.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "host"}
- ),
- (
- severity.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "algorithm"}
+ {"option_names": ["algorithm", "host"]},
+ None
)
)
self.assertFalse(facade.need_stopped_cluster)
@@ -1395,12 +1387,8 @@ quorum {
(
severity.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "host"}
- ),
- (
- severity.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "algorithm"}
+ {"option_names": ["algorithm", "host"]},
+ None
)
)
self.assertFalse(facade.need_stopped_cluster)
@@ -1473,7 +1461,7 @@ quorum {
severity.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_model_option",
+ "option_names": ["bad_model_option"],
"option_type": "quorum device model",
"allowed": [
"algorithm",
@@ -1525,7 +1513,7 @@ quorum {
severity.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_generic_option",
+ "option_names": ["bad_generic_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
}
@@ -1574,6 +1562,7 @@ quorum {
)
)
+ reporter = MockLibraryReportProcessor()
assert_raise_library_error(
lambda: facade.add_quorum_device(
reporter,
@@ -1697,12 +1686,8 @@ quorum {
(
severity.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "host"},
- ),
- (
- severity.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "algorithm"}
+ {"option_names": ["algorithm", "host"]},
+ None
),
(
severity.ERROR,
@@ -1735,12 +1720,8 @@ quorum {
(
severity.ERROR,
report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "host"},
- ),
- (
- severity.ERROR,
- report_codes.REQUIRED_OPTION_IS_MISSING,
- {"option_name": "algorithm"}
+ {"option_names": ["algorithm", "host"]},
+ None
)
)
self.assertFalse(facade.need_stopped_cluster)
@@ -1780,7 +1761,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_model_option",
+ "option_names": ["bad_model_option"],
"option_type": "quorum device model",
"allowed": [
"algorithm",
@@ -1889,7 +1870,7 @@ quorum {
severity.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_model_option",
+ "option_names": ["bad_model_option"],
"option_type": "quorum device model",
"allowed": [
"algorithm",
@@ -2005,7 +1986,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_generic_option",
+ "option_names": ["bad_generic_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
},
@@ -2015,7 +1996,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "model",
+ "option_names": ["model"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
}
@@ -2062,7 +2043,7 @@ quorum {
severity.ERROR,
report_codes.INVALID_OPTION,
{
- "option_name": "model",
+ "option_names": ["model"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
}
@@ -2108,7 +2089,7 @@ quorum {
severity.WARNING,
report_codes.INVALID_OPTION,
{
- "option_name": "bad_generic_option",
+ "option_names": ["bad_generic_option"],
"option_type": "quorum device",
"allowed": ["sync_timeout", "timeout"],
},
diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
index d900ced..3868b0f 100644
--- a/pcs/test/test_lib_external.py
+++ b/pcs/test/test_lib_external.py
@@ -8,29 +8,24 @@ from __future__ import (
from pcs.test.tools.pcs_unittest import TestCase
import os.path
import logging
-try:
- # python2
- from urllib2 import (
- HTTPError as urllib_HTTPError,
- URLError as urllib_URLError
- )
-except ImportError:
- # python3
- from urllib.error import (
- HTTPError as urllib_HTTPError,
- URLError as urllib_URLError
- )
from pcs.test.tools.assertions import (
assert_raise_library_error,
assert_report_item_equal,
assert_report_item_list_equal,
)
-from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.custom_mock import (
+ MockCurl,
+ MockLibraryReportProcessor,
+)
from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.misc import outdent
from pcs import settings
-from pcs.common import report_codes
+from pcs.common import (
+ pcs_pycurl as pycurl,
+ report_codes,
+)
from pcs.lib import reports
from pcs.lib.errors import (
LibraryError,
@@ -87,18 +82,25 @@ class CommandRunnerTest(TestCase):
{"env": {}, "stdin": None,}
)
logger_calls = [
- mock.call("Running: {0}".format(command_str)),
- mock.call("""\
-Finished running: {0}
-Return value: {1}
---Debug Stdout Start--
-{2}
---Debug Stdout End--
---Debug Stderr Start--
-{3}
---Debug Stderr End--""".format(
- command_str, expected_retval, expected_stdout, expected_stderr
- ))
+ mock.call("Running: {0}\nEnvironment:".format(command_str)),
+ mock.call(
+ outdent(
+ """\
+ Finished running: {0}
+ Return value: {1}
+ --Debug Stdout Start--
+ {2}
+ --Debug Stdout End--
+ --Debug Stderr Start--
+ {3}
+ --Debug Stderr End--"""
+ ).format(
+ command_str,
+ expected_retval,
+ expected_stdout,
+ expected_stderr,
+ )
+ )
]
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -111,6 +113,7 @@ Return value: {1}
{
"command": command_str,
"stdin": None,
+ "environment": dict(),
}
),
(
@@ -139,15 +142,19 @@ Return value: {1}
mock_process.returncode = expected_retval
mock_popen.return_value = mock_process
+ global_env = {"a": "a", "b": "b"}
runner = lib.CommandRunner(
self.mock_logger,
self.mock_reporter,
- {"a": "a", "b": "b"}
+ global_env.copy()
)
+ #{C} is for check that no python template conflict appear
real_stdout, real_stderr, real_retval = runner.run(
command,
- env_extend={"b": "B", "c": "C"}
+ env_extend={"b": "B", "c": "{C}"}
)
+ #check that env_exted did not affect initial env of runner
+ self.assertEqual(runner._env_vars, global_env)
self.assertEqual(real_stdout, expected_stdout)
self.assertEqual(real_stderr, expected_stderr)
@@ -156,21 +163,37 @@ Return value: {1}
self.assert_popen_called_with(
mock_popen,
command,
- {"env": {"a": "a", "b": "B", "c": "C"}, "stdin": None,}
+ {"env": {"a": "a", "b": "B", "c": "{C}"}, "stdin": None,}
)
logger_calls = [
- mock.call("Running: {0}".format(command_str)),
- mock.call("""\
-Finished running: {0}
-Return value: {1}
---Debug Stdout Start--
-{2}
---Debug Stdout End--
---Debug Stderr Start--
-{3}
---Debug Stderr End--""".format(
- command_str, expected_retval, expected_stdout, expected_stderr
- ))
+ mock.call(
+ outdent(
+ """\
+ Running: {0}
+ Environment:
+ a=a
+ b=B
+ c={1}"""
+ ).format(command_str, "{C}")
+ ),
+ mock.call(
+ outdent(
+ """\
+ Finished running: {0}
+ Return value: {1}
+ --Debug Stdout Start--
+ {2}
+ --Debug Stdout End--
+ --Debug Stderr Start--
+ {3}
+ --Debug Stderr End--"""
+ ).format(
+ command_str,
+ expected_retval,
+ expected_stdout,
+ expected_stderr,
+ )
+ )
]
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -183,6 +206,7 @@ Return value: {1}
{
"command": command_str,
"stdin": None,
+ "environment": {"a": "a", "b": "B", "c": "{C}"},
}
),
(
@@ -227,21 +251,32 @@ Return value: {1}
{"env": {}, "stdin": -1}
)
logger_calls = [
- mock.call("""\
-Running: {0}
---Debug Input Start--
-{1}
---Debug Input End--""".format(command_str, stdin)),
- mock.call("""\
-Finished running: {0}
-Return value: {1}
---Debug Stdout Start--
-{2}
---Debug Stdout End--
---Debug Stderr Start--
-{3}
---Debug Stderr End--""".format(
- command_str, expected_retval, expected_stdout, expected_stderr
+ mock.call(
+ outdent(
+ """\
+ Running: {0}
+ Environment:
+ --Debug Input Start--
+ {1}
+ --Debug Input End--"""
+ ).format(command_str, stdin)
+ ),
+ mock.call(
+ outdent(
+ """\
+ Finished running: {0}
+ Return value: {1}
+ --Debug Stdout Start--
+ {2}
+ --Debug Stdout End--
+ --Debug Stderr Start--
+ {3}
+ --Debug Stderr End--"""
+ ).format(
+ command_str,
+ expected_retval,
+ expected_stdout,
+ expected_stderr,
))
]
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
@@ -255,6 +290,7 @@ Return value: {1}
{
"command": command_str,
"stdin": stdin,
+ "environment": dict(),
}
),
(
@@ -299,7 +335,7 @@ Return value: {1}
{"env": {}, "stdin": None,}
)
logger_calls = [
- mock.call("Running: {0}".format(command_str)),
+ mock.call("Running: {0}\nEnvironment:".format(command_str)),
]
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -312,6 +348,7 @@ Return value: {1}
{
"command": command_str,
"stdin": None,
+ "environment": dict(),
}
)
]
@@ -347,7 +384,7 @@ Return value: {1}
{"env": {}, "stdin": None,}
)
logger_calls = [
- mock.call("Running: {0}".format(command_str)),
+ mock.call("Running: {0}\nEnvironment:".format(command_str)),
]
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -360,6 +397,7 @@ Return value: {1}
{
"command": command_str,
"stdin": None,
+ "environment": dict(),
}
)
]
@@ -367,7 +405,7 @@ Return value: {1}
@mock.patch(
- "pcs.lib.external.NodeCommunicator._NodeCommunicator__get_opener",
+ "pcs.lib.external.pycurl.Curl",
autospec=True
)
class NodeCommunicatorTest(TestCase):
@@ -375,26 +413,24 @@ class NodeCommunicatorTest(TestCase):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
- def fixture_response(self, response_code, response_data):
- response = mock.MagicMock(["getcode", "read"])
- response.getcode.return_value = response_code
- response.read.return_value = response_data.encode("utf-8")
- return response
-
- def fixture_http_exception(self, response_code, response_data):
- response = urllib_HTTPError("url", response_code, "msg", [], None)
- response.read = mock.MagicMock(
- return_value=response_data.encode("utf-8")
- )
- return response
-
def fixture_logger_call_send(self, url, data):
send_msg = "Sending HTTP Request to: {url}"
if data:
send_msg += "\n--Debug Input Start--\n{data}\n--Debug Input End--"
return mock.call(send_msg.format(url=url, data=data))
- def fixture_logger_calls(self, url, data, response_code, response_data):
+ def fixture_logger_call_debug_data(self, url, data):
+ send_msg = outdent("""\
+ Communication debug info for calling: {url}
+ --Debug Communication Info Start--
+ {data}
+ --Debug Communication Info End--"""
+ )
+ return mock.call(send_msg.format(url=url, data=data))
+
+ def fixture_logger_calls(
+ self, url, data, response_code, response_data, debug_data
+ ):
result_msg = (
"Finished calling: {url}\nResponse Code: {code}"
+ "\n--Debug Response Start--\n{response}\n--Debug Response End--"
@@ -403,7 +439,8 @@ class NodeCommunicatorTest(TestCase):
self.fixture_logger_call_send(url, data),
mock.call(result_msg.format(
url=url, code=response_code, response=response_data
- ))
+ )),
+ self.fixture_logger_call_debug_data(url, debug_data)
]
def fixture_report_item_list_send(self, url, data):
@@ -418,7 +455,21 @@ class NodeCommunicatorTest(TestCase):
)
]
- def fixture_report_item_list(self, url, data, response_code, response_data):
+ def fixture_report_item_list_debug(self, url, data):
+ return [
+ (
+ severity.DEBUG,
+ report_codes.NODE_COMMUNICATION_DEBUG_INFO,
+ {
+ "target": url,
+ "data": data,
+ }
+ )
+ ]
+
+ def fixture_report_item_list(
+ self, url, data, response_code, response_data, debug_data
+ ):
return (
self.fixture_report_item_list_send(url, data)
+
@@ -433,6 +484,8 @@ class NodeCommunicatorTest(TestCase):
}
)
]
+ +
+ self.fixture_report_item_list_debug(url, debug_data)
)
def fixture_url(self, host, request):
@@ -440,32 +493,104 @@ class NodeCommunicatorTest(TestCase):
host=host, request=request
)
- def test_success(self, mock_get_opener):
+ def test_success(self, mock_pycurl_init):
host = "test_host"
request = "test_request"
data = '{"key1": "value1", "key2": ["value2a", "value2b"]}'
- expected_response_code = 200
expected_response_data = "expected response data"
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
- mock_opener.open.return_value = self.fixture_response(
- expected_response_code, expected_response_data
+ expected_response_code = 200
+ expected_debug_data = "* text\n>> data out\n"
+ mock_pycurl_obj = MockCurl(
+ {
+ pycurl.RESPONSE_CODE: expected_response_code,
+ },
+ expected_response_data.encode("utf-8"),
+ [
+ (pycurl.DEBUG_TEXT, b"text"),
+ (pycurl.DEBUG_DATA_OUT, b"data out")
+ ]
)
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
real_response = comm.call_host(host, request, data)
self.assertEqual(expected_response_data, real_response)
- mock_opener.addheaders.append.assert_not_called()
- mock_opener.open.assert_called_once_with(
+ expected_opts = {
+ pycurl.URL: self.fixture_url(host, request).encode("utf-8"),
+ pycurl.SSL_VERIFYHOST: 0,
+ pycurl.SSL_VERIFYPEER: 0,
+ pycurl.COPYPOSTFIELDS: data.encode("utf-8"),
+ pycurl.TIMEOUT_MS: settings.default_request_timeout * 1000,
+ }
+
+ self.assertLessEqual(
+ set(expected_opts.items()), set(mock_pycurl_obj.opts.items())
+ )
+
+ logger_calls = self.fixture_logger_calls(
self.fixture_url(host, request),
- data.encode("utf-8")
+ data,
+ expected_response_code,
+ expected_response_data,
+ expected_debug_data
)
+ self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+ self.mock_logger.debug.assert_has_calls(logger_calls)
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ self.fixture_report_item_list(
+ self.fixture_url(host, request),
+ data,
+ expected_response_code,
+ expected_response_data,
+ expected_debug_data
+ )
+ )
+
+ @mock.patch("pcs.lib.external.os")
+ def test_success_proxy_set(self, mock_os, mock_pycurl_init):
+ host = "test_host"
+ request = "test_request"
+ data = '{"key1": "value1", "key2": ["value2a", "value2b"]}'
+ expected_response_data = "expected response data"
+ expected_response_code = 200
+ mock_os.environ = {
+ "all_proxy": "proxy1",
+ "https_proxy": "proxy2",
+ "HTTPS_PROXY": "proxy3",
+ }
+ mock_pycurl_obj = MockCurl(
+ {
+ pycurl.RESPONSE_CODE: expected_response_code,
+ },
+ expected_response_data.encode("utf-8"),
+ []
+ )
+ mock_pycurl_init.return_value = mock_pycurl_obj
+
+ comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
+ real_response = comm.call_host(host, request, data)
+ self.assertEqual(expected_response_data, real_response)
+
+ expected_opts = {
+ pycurl.URL: self.fixture_url(host, request).encode("utf-8"),
+ pycurl.SSL_VERIFYHOST: 0,
+ pycurl.SSL_VERIFYPEER: 0,
+ pycurl.COPYPOSTFIELDS: data.encode("utf-8"),
+ pycurl.TIMEOUT_MS: settings.default_request_timeout * 1000,
+ }
+
+ self.assertLessEqual(
+ set(expected_opts.items()), set(mock_pycurl_obj.opts.items())
+ )
+
logger_calls = self.fixture_logger_calls(
self.fixture_url(host, request),
data,
expected_response_code,
- expected_response_data
+ expected_response_data,
+ ""
)
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -475,22 +600,27 @@ class NodeCommunicatorTest(TestCase):
self.fixture_url(host, request),
data,
expected_response_code,
- expected_response_data
+ expected_response_data,
+ ""
)
)
- def test_ipv6(self, mock_get_opener):
+ def test_ipv6(self, mock_pycurl_init):
host = "cafe::1"
request = "test_request"
data = None
token = "test_token"
expected_response_code = 200
expected_response_data = "expected response data"
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
- mock_opener.open.return_value = self.fixture_response(
- expected_response_code, expected_response_data
+ expected_debug_data = ""
+ mock_pycurl_obj = MockCurl(
+ {
+ pycurl.RESPONSE_CODE: expected_response_code,
+ },
+ expected_response_data.encode("utf-8"),
+ []
)
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(
self.mock_logger,
@@ -499,19 +629,26 @@ class NodeCommunicatorTest(TestCase):
)
real_response = comm.call_host(host, request, data)
self.assertEqual(expected_response_data, real_response)
-
- mock_opener.addheaders.append.assert_called_once_with(
- ("Cookie", "token={0}".format(token))
- )
- mock_opener.open.assert_called_once_with(
- self.fixture_url("[{0}]".format(host), request),
- data
+ expected_opts = {
+ pycurl.URL: self.fixture_url(
+ "[{0}]".format(host), request
+ ).encode("utf-8"),
+ pycurl.COOKIE: "token={0}".format(token).encode("utf-8"),
+ pycurl.SSL_VERIFYHOST: 0,
+ pycurl.SSL_VERIFYPEER: 0,
+ }
+ self.assertLessEqual(
+ set(expected_opts.items()), set(mock_pycurl_obj.opts.items())
)
+
+ self.assertTrue(pycurl.COPYPOSTFIELDS not in mock_pycurl_obj.opts)
+
logger_calls = self.fixture_logger_calls(
self.fixture_url("[{0}]".format(host), request),
data,
expected_response_code,
- expected_response_data
+ expected_response_data,
+ expected_debug_data
)
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -521,15 +658,48 @@ class NodeCommunicatorTest(TestCase):
self.fixture_url("[{0}]".format(host), request),
data,
expected_response_code,
- expected_response_data
+ expected_response_data,
+ expected_debug_data
)
)
- def test_auth_token(self, mock_get_opener):
+ def test_communicator_timeout(self, mock_pycurl_init):
+ host = "test_host"
+ timeout = 10
+ mock_pycurl_obj = MockCurl({pycurl.RESPONSE_CODE: 200}, b"", [])
+ mock_pycurl_init.return_value = mock_pycurl_obj
+
+ comm = lib.NodeCommunicator(
+ self.mock_logger, self.mock_reporter, {}, request_timeout=timeout
+ )
+ dummy_response = comm.call_host(host, "test_request", None)
+
+ self.assertLessEqual(
+ set([(pycurl.TIMEOUT_MS, timeout * 1000)]),
+ set(mock_pycurl_obj.opts.items())
+ )
+
+ def test_call_host_timeout(self, mock_pycurl_init):
+ host = "test_host"
+ timeout = 10
+ mock_pycurl_obj = MockCurl({pycurl.RESPONSE_CODE: 200}, b"", [])
+ mock_pycurl_init.return_value = mock_pycurl_obj
+
+ comm = lib.NodeCommunicator(
+ self.mock_logger, self.mock_reporter, {}, request_timeout=15
+ )
+ dummy_response = comm.call_host(host, "test_request", None, timeout)
+
+ self.assertLessEqual(
+ set([(pycurl.TIMEOUT_MS, timeout * 1000)]),
+ set(mock_pycurl_obj.opts.items())
+ )
+
+ def test_auth_token(self, mock_pycurl_init):
host = "test_host"
token = "test_token"
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
+ mock_pycurl_obj = MockCurl({pycurl.RESPONSE_CODE: 200}, b"", [])
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(
self.mock_logger,
@@ -542,15 +712,16 @@ class NodeCommunicatorTest(TestCase):
)
dummy_response = comm.call_host(host, "test_request", None)
- mock_opener.addheaders.append.assert_called_once_with(
- ("Cookie", "token={0}".format(token))
+ self.assertLessEqual(
+ set([(pycurl.COOKIE, "token={0}".format(token).encode("utf-8"))]),
+ set(mock_pycurl_obj.opts.items())
)
- def test_user(self, mock_get_opener):
+ def test_user(self, mock_pycurl_init):
host = "test_host"
user = "test_user"
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
+ mock_pycurl_obj = MockCurl({pycurl.RESPONSE_CODE: 200}, b"", [])
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(
self.mock_logger,
@@ -560,15 +731,16 @@ class NodeCommunicatorTest(TestCase):
)
dummy_response = comm.call_host(host, "test_request", None)
- mock_opener.addheaders.append.assert_called_once_with(
- ("Cookie", "CIB_user={0}".format(user))
+ self.assertLessEqual(
+ set([(pycurl.COOKIE, "CIB_user={0}".format(user).encode("utf-8"))]),
+ set(mock_pycurl_obj.opts.items())
)
- def test_one_group(self, mock_get_opener):
+ def test_one_group(self, mock_pycurl_init):
host = "test_host"
groups = ["group1"]
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
+ mock_pycurl_obj = MockCurl({pycurl.RESPONSE_CODE: 200}, b"", [])
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(
self.mock_logger,
@@ -578,53 +750,57 @@ class NodeCommunicatorTest(TestCase):
)
dummy_response = comm.call_host(host, "test_request", None)
- mock_opener.addheaders.append.assert_called_once_with(
- (
- "Cookie",
- "CIB_user_groups={0}".format("Z3JvdXAx".encode("utf8"))
- )
+ self.assertLessEqual(
+ set([(
+ pycurl.COOKIE,
+ "CIB_user_groups={0}".format("Z3JvdXAx").encode("utf-8")
+ )]),
+ set(mock_pycurl_obj.opts.items())
)
- def test_all_options(self, mock_get_opener):
+ def test_all_options(self, mock_pycurl_init):
host = "test_host"
token = "test_token"
user = "test_user"
groups = ["group1", "group2"]
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
+ mock_pycurl_obj = MockCurl({pycurl.RESPONSE_CODE: 200}, b"", [])
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(
self.mock_logger,
self.mock_reporter,
{host: token},
- user, groups
+ user,
+ groups
)
dummy_response = comm.call_host(host, "test_request", None)
- mock_opener.addheaders.append.assert_called_once_with(
- (
- "Cookie",
- "token={token};CIB_user={user};CIB_user_groups={groups}".format(
- token=token,
- user=user,
- groups="Z3JvdXAxIGdyb3VwMg==".encode("utf-8")
- )
- )
+ cookie_str = (
+ "token={token};CIB_user={user};CIB_user_groups={groups}".format(
+ token=token,
+ user=user,
+ groups="Z3JvdXAxIGdyb3VwMg=="
+ ).encode("utf-8")
+ )
+ self.assertLessEqual(
+ set([(pycurl.COOKIE, cookie_str)]),
+ set(mock_pycurl_obj.opts.items())
)
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
- def base_test_http_error(self, mock_get_opener, code, exception):
+ def base_test_http_error(self, mock_pycurl_init, code, exception):
host = "test_host"
request = "test_request"
data = None
expected_response_code = code
expected_response_data = "expected response data"
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
- mock_opener.open.side_effect = self.fixture_http_exception(
- expected_response_code, expected_response_data
+ mock_pycurl_obj = MockCurl(
+ {
+ pycurl.RESPONSE_CODE: expected_response_code,
+ },
+ expected_response_data.encode("utf-8"),
+ []
)
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
self.assertRaises(
@@ -632,16 +808,22 @@ class NodeCommunicatorTest(TestCase):
lambda: comm.call_host(host, request, data)
)
- mock_opener.addheaders.append.assert_not_called()
- mock_opener.open.assert_called_once_with(
- self.fixture_url(host, request),
- data
+ self.assertTrue(pycurl.COOKIE not in mock_pycurl_obj.opts)
+ self.assertTrue(pycurl.COPYPOSTFIELDS not in mock_pycurl_obj.opts)
+ expected_opts = {
+ pycurl.URL: self.fixture_url(host, request).encode("utf-8"),
+ pycurl.SSL_VERIFYHOST: 0,
+ pycurl.SSL_VERIFYPEER: 0,
+ }
+ self.assertLessEqual(
+ set(expected_opts.items()), set(mock_pycurl_obj.opts.items())
)
logger_calls = self.fixture_logger_calls(
self.fixture_url(host, request),
data,
expected_response_code,
- expected_response_data
+ expected_response_data,
+ ""
)
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -651,8 +833,9 @@ class NodeCommunicatorTest(TestCase):
self.fixture_url(host, request),
data,
expected_response_code,
- expected_response_data
- )
+ expected_response_data,
+ ""
+ ),
)
def test_no_authenticated(self, mock_get_opener):
@@ -690,14 +873,16 @@ class NodeCommunicatorTest(TestCase):
lib.NodeCommunicationException
)
- def test_connection_error(self, mock_get_opener):
+ def test_connection_error(self, mock_pycurl_init):
host = "test_host"
request = "test_request"
data = None
expected_reason = "expected reason"
- mock_opener = mock.MagicMock()
- mock_get_opener.return_value = mock_opener
- mock_opener.open.side_effect = urllib_URLError(expected_reason)
+ expected_url = self.fixture_url(host, request)
+ mock_pycurl_obj = MockCurl(
+ {}, b"", [], pycurl.error(pycurl.E_SEND_ERROR, expected_reason)
+ )
+ mock_pycurl_init.return_value = mock_pycurl_obj
comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
self.assertRaises(
@@ -705,19 +890,22 @@ class NodeCommunicatorTest(TestCase):
lambda: comm.call_host(host, request, data)
)
- mock_opener.addheaders.append.assert_not_called()
- mock_opener.open.assert_called_once_with(
- self.fixture_url(host, request),
- data
+ self.assertTrue(pycurl.COOKIE not in mock_pycurl_obj.opts)
+ self.assertTrue(pycurl.COPYPOSTFIELDS not in mock_pycurl_obj.opts)
+ expected_opts = {
+ pycurl.URL: expected_url.encode("utf-8"),
+ pycurl.SSL_VERIFYHOST: 0,
+ pycurl.SSL_VERIFYPEER: 0,
+ }
+ self.assertLessEqual(
+ set(expected_opts.items()), set(mock_pycurl_obj.opts.items())
)
logger_calls = [
- self.fixture_logger_call_send(
- self.fixture_url(host, request),
- data
- ),
+ self.fixture_logger_call_send(expected_url, data),
mock.call(
"Unable to connect to {0} ({1})".format(host, expected_reason)
- )
+ ),
+ self.fixture_logger_call_debug_data(expected_url, "")
]
self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -736,6 +924,69 @@ class NodeCommunicatorTest(TestCase):
"reason": expected_reason,
}
)]
+ +
+ self.fixture_report_item_list_debug(expected_url, "")
+ )
+
+ @mock.patch("pcs.lib.external.os")
+ def test_connection_error_proxy_set(self, mock_os, mock_pycurl_init):
+ host = "test_host"
+ request = "test_request"
+ data = None
+ expected_reason = "expected reason"
+ expected_url = self.fixture_url(host, request)
+ mock_os.environ = {
+ "all_proxy": "proxy1",
+ "https_proxy": "proxy2",
+ "HTTPS_PROXY": "proxy3",
+ }
+ mock_pycurl_obj = MockCurl(
+ {}, b"", [], pycurl.error(pycurl.E_SEND_ERROR, expected_reason)
+ )
+ mock_pycurl_init.return_value = mock_pycurl_obj
+
+ comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
+ self.assertRaises(
+ lib.NodeConnectionException,
+ lambda: comm.call_host(host, request, data)
+ )
+
+ self.assertTrue(pycurl.COOKIE not in mock_pycurl_obj.opts)
+ self.assertTrue(pycurl.COPYPOSTFIELDS not in mock_pycurl_obj.opts)
+ logger_calls = [
+ self.fixture_logger_call_send(expected_url, data),
+ mock.call(
+ "Unable to connect to {0} ({1})".format(host, expected_reason)
+ ),
+ self.fixture_logger_call_debug_data(expected_url, "")
+ ]
+ self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+ self.mock_logger.debug.assert_has_calls(logger_calls)
+ self.mock_logger.warning.assert_has_calls([mock.call("Proxy is set")])
+ assert_report_item_list_equal(
+ self.mock_reporter.report_item_list,
+ self.fixture_report_item_list_send(
+ self.fixture_url(host, request),
+ data
+ )
+ +
+ [
+ (
+ severity.DEBUG,
+ report_codes.NODE_COMMUNICATION_NOT_CONNECTED,
+ {
+ "node": host,
+ "reason": expected_reason,
+ }
+ ),
+ (
+ severity.WARNING,
+ report_codes.NODE_COMMUNICATION_PROXY_IS_SET,
+ {}
+ )
+ ]
+ +
+ self.fixture_report_item_list_debug(expected_url, "")
)
@@ -992,9 +1243,15 @@ class ParallelCommunicationHelperTest(TestCase):
)
class IsCmanClusterTest(TestCase):
- def template_test(self, is_cman, corosync_output, corosync_retval=0):
+ def template_test(self, version_description, is_cman, corosync_retval=0):
mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
- mock_runner.run.return_value = (corosync_output, "", corosync_retval)
+ mock_runner.run.return_value = (
+ "Corosync Cluster Engine{0}\nCopyright (c) 2006-2009 Red Hat, Inc."
+ .format(version_description)
+ ,
+ "",
+ corosync_retval
+ )
self.assertEqual(is_cman, lib.is_cman_cluster(mock_runner))
mock_runner.run.assert_called_once_with([
os.path.join(settings.corosync_binaries, "corosync"),
@@ -1002,52 +1259,24 @@ class IsCmanClusterTest(TestCase):
])
def test_is_not_cman(self):
- self.template_test(
- False,
- """\
-Corosync Cluster Engine, version '2.3.4'
-Copyright (c) 2006-2009 Red Hat, Inc.
-"""
- )
+ self.template_test(", version '2.3.4'", is_cman=False)
def test_is_cman(self):
- self.template_test(
- True,
- """\
-Corosync Cluster Engine, version '1.4.7'
-Copyright (c) 2006-2009 Red Hat, Inc.
-"""
- )
+ self.template_test(", version '1.4.7'", is_cman=True)
def test_bad_version_format(self):
- self.template_test(
- False,
- """\
-Corosync Cluster Engine, nonsense '2.3.4'
-Copyright (c) 2006-2009 Red Hat, Inc.
-"""
- )
+ self.template_test(", nonsense '2.3.4'", is_cman=False)
def test_no_version(self):
- self.template_test(
- False,
- """\
-Corosync Cluster Engine
-Copyright (c) 2006-2009 Red Hat, Inc.
-"""
- )
+ self.template_test("", is_cman=False)
def test_corosync_error(self):
self.template_test(
- False,
- """\
-Corosync Cluster Engine, version '1.4.7'
-Copyright (c) 2006-2009 Red Hat, Inc.
-""",
- 1
+ ", version '1.4.7'",
+ is_cman=False,
+ corosync_retval=1
)
-
@mock.patch("pcs.lib.external.is_systemctl")
@mock.patch("pcs.lib.external.is_service_installed")
class DisableServiceTest(TestCase):
@@ -1060,6 +1289,9 @@ class DisableServiceTest(TestCase):
mock_systemctl.return_value = True
self.mock_runner.run.return_value = ("", "Removed symlink", 0)
lib.disable_service(self.mock_runner, self.service)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, None
+ )
self.mock_runner.run.assert_called_once_with(
[_systemctl, "disable", self.service + ".service"]
)
@@ -1072,6 +1304,9 @@ class DisableServiceTest(TestCase):
lib.DisableServiceError,
lambda: lib.disable_service(self.mock_runner, self.service)
)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, None
+ )
self.mock_runner.run.assert_called_once_with(
[_systemctl, "disable", self.service + ".service"]
)
@@ -1081,6 +1316,9 @@ class DisableServiceTest(TestCase):
mock_systemctl.return_value = False
self.mock_runner.run.return_value = ("", "", 0)
lib.disable_service(self.mock_runner, self.service)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, None
+ )
self.mock_runner.run.assert_called_once_with(
[_chkconfig, self.service, "off"]
)
@@ -1093,6 +1331,9 @@ class DisableServiceTest(TestCase):
lib.DisableServiceError,
lambda: lib.disable_service(self.mock_runner, self.service)
)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, None
+ )
self.mock_runner.run.assert_called_once_with(
[_chkconfig, self.service, "off"]
)
@@ -1104,6 +1345,9 @@ class DisableServiceTest(TestCase):
mock_systemctl.return_value = True
lib.disable_service(self.mock_runner, self.service)
self.assertEqual(self.mock_runner.run.call_count, 0)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, None
+ )
def test_not_systemctl_not_installed(
self, mock_is_installed, mock_systemctl
@@ -1112,12 +1356,19 @@ class DisableServiceTest(TestCase):
mock_systemctl.return_value = False
lib.disable_service(self.mock_runner, self.service)
self.assertEqual(self.mock_runner.run.call_count, 0)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, None
+ )
def test_instance_systemctl(self, mock_is_installed, mock_systemctl):
+ instance = "test"
mock_is_installed.return_value = True
mock_systemctl.return_value = True
self.mock_runner.run.return_value = ("", "Removed symlink", 0)
- lib.disable_service(self.mock_runner, self.service, instance="test")
+ lib.disable_service(self.mock_runner, self.service, instance=instance)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, instance
+ )
self.mock_runner.run.assert_called_once_with([
_systemctl,
"disable",
@@ -1125,10 +1376,14 @@ class DisableServiceTest(TestCase):
])
def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl):
+ instance = "test"
mock_is_installed.return_value = True
mock_systemctl.return_value = False
self.mock_runner.run.return_value = ("", "", 0)
- lib.disable_service(self.mock_runner, self.service, instance="test")
+ lib.disable_service(self.mock_runner, self.service, instance=instance)
+ mock_is_installed.assert_called_once_with(
+ self.mock_runner, self.service, instance
+ )
self.mock_runner.run.assert_called_once_with(
[_chkconfig, self.service, "off"]
)
@@ -1507,6 +1762,45 @@ class IsServiceInstalledTest(TestCase):
mock_non_systemd.assert_called_once_with(self.mock_runner)
self.assertEqual(mock_systemd.call_count, 0)
+ def test_installed_systemd_instance(
+ self, mock_non_systemd, mock_systemd, mock_is_systemctl
+ ):
+ mock_is_systemctl.return_value = True
+ mock_systemd.return_value = ["service1", "service2@"]
+ mock_non_systemd.return_value = []
+ self.assertTrue(
+ lib.is_service_installed(self.mock_runner, "service2", "instance")
+ )
+ self.assertEqual(mock_is_systemctl.call_count, 1)
+ mock_systemd.assert_called_once_with(self.mock_runner)
+ self.assertEqual(mock_non_systemd.call_count, 0)
+
+ def test_not_installed_systemd_instance(
+ self, mock_non_systemd, mock_systemd, mock_is_systemctl
+ ):
+ mock_is_systemctl.return_value = True
+ mock_systemd.return_value = ["service1", "service2"]
+ mock_non_systemd.return_value = []
+ self.assertFalse(
+ lib.is_service_installed(self.mock_runner, "service2", "instance")
+ )
+ self.assertEqual(mock_is_systemctl.call_count, 1)
+ mock_systemd.assert_called_once_with(self.mock_runner)
+ self.assertEqual(mock_non_systemd.call_count, 0)
+
+ def test_installed_not_systemd_instance(
+ self, mock_non_systemd, mock_systemd, mock_is_systemctl
+ ):
+ mock_is_systemctl.return_value = False
+ mock_systemd.return_value = []
+ mock_non_systemd.return_value = ["service1", "service2"]
+ self.assertTrue(
+ lib.is_service_installed(self.mock_runner, "service2", "instance")
+ )
+ self.assertEqual(mock_is_systemctl.call_count, 1)
+ mock_non_systemd.assert_called_once_with(self.mock_runner)
+ self.assertEqual(mock_systemd.call_count, 0)
+
@mock.patch("pcs.lib.external.is_systemctl")
class GetSystemdServicesTest(TestCase):
@@ -1515,13 +1809,15 @@ class GetSystemdServicesTest(TestCase):
def test_success(self, mock_is_systemctl):
mock_is_systemctl.return_value = True
- self.mock_runner.run.return_value = ("""\
-pcsd.service disabled
-sbd.service enabled
-pacemaker.service enabled
+ self.mock_runner.run.return_value = (outdent(
+ """\
+ pcsd.service disabled
+ sbd.service enabled
+ pacemaker.service enabled
-3 unit files listed.
-""", "", 0)
+ 3 unit files listed.
+ """
+ ), "", 0)
self.assertEqual(
lib.get_systemd_services(self.mock_runner),
["pcsd", "sbd", "pacemaker"]
@@ -1554,11 +1850,13 @@ class GetNonSystemdServicesTest(TestCase):
def test_success(self, mock_is_systemctl):
mock_is_systemctl.return_value = False
- self.mock_runner.run.return_value = ("""\
-pcsd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
-sbd 0:off 1:on 2:on 3:on 4:on 5:on 6:off
-pacemaker 0:off 1:off 2:off 3:off 4:off 5:off 6:off
-""", "", 0)
+ self.mock_runner.run.return_value = (outdent(
+ """\
+ pcsd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
+ sbd 0:off 1:on 2:on 3:on 4:on 5:on 6:off
+ pacemaker 0:off 1:off 2:off 3:off 4:off 5:off 6:off
+ """
+ ), "", 0)
self.assertEqual(
lib.get_non_systemd_services(self.mock_runner),
["pcsd", "sbd", "pacemaker"]
@@ -1597,3 +1895,60 @@ class EnsureIsSystemctlTest(TestCase):
)
)
+
+class IsProxySetTest(TestCase):
+ def test_without_proxy(self):
+ self.assertFalse(lib.is_proxy_set({
+ "var1": "value",
+ "var2": "val",
+ }))
+
+ def test_multiple(self):
+ self.assertTrue(lib.is_proxy_set({
+ "var1": "val",
+ "https_proxy": "test.proxy",
+ "var2": "val",
+ "all_proxy": "test2.proxy",
+ "var3": "val",
+ }))
+
+ def test_empty_string(self):
+ self.assertFalse(lib.is_proxy_set({
+ "all_proxy": "",
+ }))
+
+ def test_http_proxy(self):
+ self.assertFalse(lib.is_proxy_set({
+ "http_proxy": "test.proxy",
+ }))
+
+ def test_HTTP_PROXY(self):
+ self.assertFalse(lib.is_proxy_set({
+ "HTTP_PROXY": "test.proxy",
+ }))
+
+ def test_https_proxy(self):
+ self.assertTrue(lib.is_proxy_set({
+ "https_proxy": "test.proxy",
+ }))
+
+ def test_HTTPS_PROXY(self):
+ self.assertTrue(lib.is_proxy_set({
+ "HTTPS_PROXY": "test.proxy",
+ }))
+
+ def test_all_proxy(self):
+ self.assertTrue(lib.is_proxy_set({
+ "all_proxy": "test.proxy",
+ }))
+
+ def test_ALL_PROXY(self):
+ self.assertTrue(lib.is_proxy_set({
+ "ALL_PROXY": "test.proxy",
+ }))
+
+ def test_no_proxy(self):
+ self.assertTrue(lib.is_proxy_set({
+ "no_proxy": "*",
+ "all_proxy": "test.proxy",
+ }))
diff --git a/pcs/test/test_lib_node.py b/pcs/test/test_lib_node.py
index caf128f..eca98a5 100644
--- a/pcs/test/test_lib_node.py
+++ b/pcs/test/test_lib_node.py
@@ -5,10 +5,58 @@ from __future__ import (
unicode_literals,
)
-from pcs.test.tools.pcs_unittest import TestCase
+import sys
+
+from pcs.test.tools.pcs_unittest import TestCase, skipUnless
+
+#python 2.6 does not support sys.version_info.major
+need_python3 = skipUnless(sys.version_info[0] == 3, "test requires python3")
+need_python2 = skipUnless(sys.version_info[0] == 2, "test requires python2")
import pcs.lib.node as lib
+class NodeAddressesContainHost(TestCase):
+ def test_return_true_if_is_as_ring0(self):
+ self.assertTrue(
+ lib.node_addresses_contain_host(
+ [lib.NodeAddresses("HOST")],
+ "HOST"
+ )
+ )
+
+ def test_return_true_if_is_as_ring1(self):
+ self.assertTrue(
+ lib.node_addresses_contain_host(
+ [lib.NodeAddresses("SOME", ring1="HOST")],
+ "HOST"
+ )
+ )
+
+ def test_return_false_if_not_match(self):
+ self.assertFalse(
+ lib.node_addresses_contain_host(
+ [lib.NodeAddresses("SOME", ring1="ANOTHER")],
+ "HOST"
+ )
+ )
+
+class NodeAddressesContainName(TestCase):
+ def test_return_true_if_is_as_name(self):
+ self.assertTrue(
+ lib.node_addresses_contain_name(
+ [lib.NodeAddresses("HOST", name="NAME")],
+ "NAME"
+ )
+ )
+
+ def test_return_false_if_not_match(self):
+ self.assertFalse(
+ lib.node_addresses_contain_name(
+ [lib.NodeAddresses(ring0="NAME")],
+ "NAME"
+ )
+ )
+
class NodeAddressesTest(TestCase):
def test_properties_all(self):
ring0 = "test_ring0"
@@ -65,7 +113,69 @@ class NodeAddressesTest(TestCase):
self.assertFalse(another_node0 < node0)
self.assertFalse(node1 < node1)
self.assertTrue(node0 < node1)
- self.assertFalse(node1 < node1)
+ self.assertFalse(node1 < node0)
+
+ at need_python3
+class NodeAddressesRepr(TestCase):
+ def test_host_only_specified(self):
+ self.assertEqual(repr(lib.NodeAddresses("node0")), str(
+ "<pcs.lib.node.NodeAddresses ['node0'], {'name': None, 'id': None}>"
+ ))
+
+ def test_host_and_name_specified(self):
+ self.assertEqual(repr(lib.NodeAddresses("node0", name="name0")), str(
+ "<pcs.lib.node.NodeAddresses ['node0'],"
+ " {'name': 'name0', 'id': None}>"
+ ))
+
+ def test_host_name_and_id_specified(self):
+ self.assertEqual(
+ repr(lib.NodeAddresses("node0", name="name0", id="id0")),
+ str(
+ "<pcs.lib.node.NodeAddresses ['node0'],"
+ " {'name': 'name0', 'id': 'id0'}>"
+ )
+ )
+
+ def test_host_ring1_name_and_id_specified(self):
+ self.assertEqual(
+ repr(lib.NodeAddresses("node0", "node0-1", name="name0", id="id0")),
+ str(
+ "<pcs.lib.node.NodeAddresses ['node0', 'node0-1'],"
+ " {'name': 'name0', 'id': 'id0'}>"
+ )
+ )
+
+ at need_python2
+class NodeAddressesRepr_python2(TestCase):
+ def test_host_only_specified(self):
+ self.assertEqual(repr(lib.NodeAddresses("node0")), str(
+ "<pcs.lib.node.NodeAddresses [u'node0'], {'name': None, 'id': None}>"
+ ))
+
+ def test_host_and_name_specified(self):
+ self.assertEqual(repr(lib.NodeAddresses("node0", name="name0")), str(
+ "<pcs.lib.node.NodeAddresses [u'node0'],"
+ " {'name': u'name0', 'id': None}>"
+ ))
+
+ def test_host_name_and_id_specified(self):
+ self.assertEqual(
+ repr(lib.NodeAddresses("node0", name="name0", id="id0")),
+ str(
+ "<pcs.lib.node.NodeAddresses [u'node0'],"
+ " {'name': u'name0', 'id': u'id0'}>"
+ )
+ )
+
+ def test_host_ring1_name_and_id_specified(self):
+ self.assertEqual(
+ repr(lib.NodeAddresses("node0", "node0-1", name="name0", id="id0")),
+ str(
+ "<pcs.lib.node.NodeAddresses [u'node0', u'node0-1'],"
+ " {'name': u'name0', 'id': u'id0'}>"
+ )
+ )
class NodeAddressesListTest(TestCase):
@@ -128,3 +238,30 @@ class NodeAddressesListTest(TestCase):
self.assertRaises(
lib.NodeNotFound, lambda: node_list.find_by_label("node2")
)
+
+class NodeAddressesList__add__(TestCase):
+ def test_can_add_node_addresses_list(self):
+ node0 = lib.NodeAddresses("node0")
+ node1 = lib.NodeAddresses("node1")
+ node2 = lib.NodeAddresses("node2")
+ self.assertEqual(lib.NodeAddressesList(
+ [node0, node1, node2])._list,
+ (
+ lib.NodeAddressesList([node0, node1])
+ +
+ lib.NodeAddressesList([node2])
+ )._list
+ )
+
+ def test_can_add_list(self):
+ node0 = lib.NodeAddresses("node0")
+ node1 = lib.NodeAddresses("node1")
+ node2 = lib.NodeAddresses("node2")
+ self.assertEqual(lib.NodeAddressesList(
+ [node0, node1, node2])._list,
+ (
+ lib.NodeAddressesList([node0, node1])
+ +
+ [node2]
+ )._list
+ )
diff --git a/pcs/test/test_lib_pacemaker_state.py b/pcs/test/test_lib_pacemaker_state.py
deleted file mode 100644
index 13f6eb0..0000000
--- a/pcs/test/test_lib_pacemaker_state.py
+++ /dev/null
@@ -1,154 +0,0 @@
-from __future__ import (
- absolute_import,
- division,
- print_function,
- unicode_literals,
-)
-
-from pcs.test.tools.pcs_unittest import TestCase
-from lxml import etree
-
-from pcs.test.tools.assertions import assert_raise_library_error
-from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
-
-from pcs.lib.pacemaker_state import (
- ClusterState,
- _Attrs,
- _Children,
-)
-
-from pcs.common import report_codes
-from pcs.lib.errors import ReportItemSeverity as severities
-
-class AttrsTest(TestCase):
- def test_get_declared_attr(self):
- attrs = _Attrs('test', {'node-name': 'node1'}, {'name': 'node-name'})
- self.assertEqual('node1', attrs.name)
-
- def test_raises_on_undeclared_attribute(self):
- attrs = _Attrs('test', {'node-name': 'node1'}, {})
- self.assertRaises(AttributeError, lambda: attrs.name)
-
- def test_raises_on_missing_required_attribute(self):
- attrs = _Attrs('test', {}, {'name': 'node-name'})
- self.assertRaises(AttributeError, lambda: attrs.name)
-
- def test_attr_transformation_success(self):
- attrs = _Attrs('test', {'number': '7'}, {'count': ('number', int)})
- self.assertEqual(7, attrs.count)
-
- def test_attr_transformation_fail(self):
- attrs = _Attrs('test', {'number': 'abc'}, {'count': ('number', int)})
- self.assertRaises(ValueError, lambda: attrs.count)
-
-class ChildrenTest(TestCase):
- def setUp(self):
- self.dom = etree.fromstring(
- '<main><some name="0"/><any name="1"/><any name="2"/></main>'
- )
-
- def wrap(self, element):
- return '{0}.{1}'.format(element.tag, element.attrib['name'])
-
- def test_get_declared_section(self):
- children = _Children(
- 'test', self.dom, {}, {'some_section': ('some', self.wrap)}
- )
- self.assertEqual('some.0', children.some_section)
-
- def test_get_declared_children(self):
- children = _Children('test', self.dom, {'anys': ('any', self.wrap)}, {})
- self.assertEqual(['any.1', 'any.2'], children.anys)
-
- def test_raises_on_undeclared_children(self):
- children = _Children('test', self.dom, {}, {})
- self.assertRaises(AttributeError, lambda: children.some_section)
-
-
-class TestBase(TestCase):
- def setUp(self):
- self.create_covered_status = get_xml_manipulation_creator_from_file(
- rc('crm_mon.minimal.xml')
- )
- self.covered_status = self.create_covered_status()
-
-class ClusterStatusTest(TestBase):
- def test_minimal_crm_mon_is_valid(self):
- ClusterState(str(self.covered_status))
-
- def test_refuse_invalid_xml(self):
- assert_raise_library_error(
- lambda: ClusterState('invalid xml'),
- (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {})
- )
-
- def test_refuse_invalid_document(self):
- self.covered_status.append_to_first_tag_name(
- 'nodes',
- '<node without="required attributes" />'
- )
-
- assert_raise_library_error(
- lambda: ClusterState(str(self.covered_status)),
- (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {})
- )
-
-
-class WorkWithClusterStatusNodesTest(TestBase):
- def fixture_node_string(self, **kwargs):
- attrs = dict(name='name', id='id', type='member')
- attrs.update(kwargs)
- return '''<node
- name="{name}"
- id="{id}"
- online="true"
- standby="true"
- standby_onfail="false"
- maintenance="false"
- pending="false"
- unclean="false"
- shutdown="false"
- expected_up="false"
- is_dc="false"
- resources_running="0"
- type="{type}"
- />'''.format(**attrs)
-
- def test_can_get_node_names(self):
- self.covered_status.append_to_first_tag_name(
- 'nodes',
- self.fixture_node_string(name='node1', id='1'),
- self.fixture_node_string(name='node2', id='2'),
- )
- xml = str(self.covered_status)
- self.assertEqual(
- ['node1', 'node2'],
- [node.attrs.name for node in ClusterState(xml).node_section.nodes]
- )
-
- def test_can_filter_out_remote_nodes(self):
- self.covered_status.append_to_first_tag_name(
- 'nodes',
- self.fixture_node_string(name='node1', id='1'),
- self.fixture_node_string(name='node2', type='remote', id='2'),
- )
- xml = str(self.covered_status)
- self.assertEqual(
- ['node1'],
- [
- node.attrs.name
- for node in ClusterState(xml).node_section.nodes
- if node.attrs.type != 'remote'
- ]
- )
-
-
-class WorkWithClusterStatusSummaryTest(TestBase):
- def test_nodes_count(self):
- xml = str(self.covered_status)
- self.assertEqual(0, ClusterState(xml).summary.nodes.attrs.count)
-
- def test_resources_count(self):
- xml = str(self.covered_status)
- self.assertEqual(0, ClusterState(xml).summary.resources.attrs.count)
diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
index 106d89b..aa28d8e 100644
--- a/pcs/test/test_lib_sbd.py
+++ b/pcs/test/test_lib_sbd.py
@@ -6,16 +6,18 @@ from __future__ import (
)
import json
-from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.misc import outdent
+from pcs.test.tools.pcs_unittest import TestCase, mock
-from pcs.test.tools.pcs_unittest import mock
from pcs.test.tools.assertions import (
assert_raise_library_error,
assert_report_item_list_equal,
)
from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs import settings
from pcs.common import report_codes
+from pcs.lib import reports
from pcs.lib.errors import (
ReportItemSeverity as Severities,
ReportItem,
@@ -23,8 +25,9 @@ from pcs.lib.errors import (
)
from pcs.lib.node import NodeAddresses
from pcs.lib.external import (
- NodeCommunicator,
+ CommandRunner,
NodeCommunicationException,
+ NodeCommunicator,
NodeConnectionException,
)
import pcs.lib.sbd as lib_sbd
@@ -141,6 +144,87 @@ class EvenNumberOfNodesAndNoQdevice(TestCase):
))
+ at mock.patch("pcs.lib.sbd.is_device_set_local")
+ at mock.patch("pcs.lib.sbd.is_sbd_enabled")
+ at mock.patch("pcs.lib.sbd.is_sbd_installed")
+ at mock.patch("pcs.lib.sbd._even_number_of_nodes_and_no_qdevice")
+class IsAutoTieBreakerNeededTest(TestCase):
+ def setUp(self):
+ self.runner = "runner"
+ self.corosync_conf_facade = "facade"
+ self.node_num_modifier = 1
+ self.mock_nodes_and_qdevice = None
+ self.mock_sbd_installed = None
+ self.mock_sbd_enabled = None
+ self.mock_device_set = None
+
+ def set_mocks(
+ self, mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ ):
+ self.mock_nodes_and_qdevice = mock_nodes_and_qdevice
+ self.mock_sbd_installed = mock_sbd_installed
+ self.mock_sbd_enabled = mock_sbd_enabled
+ self.mock_device_set = mock_device_set
+
+ def common_test(
+ self, nodes_and_qdevice, sbd_installed, sbd_enabled, device_set, result
+ ):
+ self.mock_nodes_and_qdevice.return_value = nodes_and_qdevice
+ self.mock_sbd_installed.return_value = sbd_installed
+ self.mock_sbd_enabled.return_value = sbd_enabled
+ self.mock_device_set.return_value = device_set
+ self.assertEqual(
+ lib_sbd.is_auto_tie_breaker_needed(
+ self.runner, self.corosync_conf_facade, self.node_num_modifier
+ ),
+ result
+ )
+ self.mock_nodes_and_qdevice.assert_called_once_with(
+ self.corosync_conf_facade, self.node_num_modifier
+ )
+
+ def test_device_set(
+ self, mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ ):
+ self.set_mocks(
+ mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ )
+ self.common_test(True, True, True, True, False)
+
+ def test_no_device(
+ self, mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ ):
+ self.set_mocks(
+ mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ )
+ self.common_test(True, True, True, False, True)
+
+ def test_no_device_with_qdevice(
+ self, mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ ):
+ self.set_mocks(
+ mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ )
+ self.common_test(False, True, True, False, False)
+
+ def test_sbd_disabled(
+ self, mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ ):
+ self.set_mocks(
+ mock_nodes_and_qdevice, mock_sbd_installed, mock_sbd_enabled,
+ mock_device_set
+ )
+ self.common_test(True, True, False, False, False)
+
+
@mock.patch("pcs.lib.sbd.is_auto_tie_breaker_needed")
class AtbHasToBeEnabledTest(TestCase):
def setUp(self):
@@ -188,14 +272,16 @@ class AtbHasToBeEnabledTest(TestCase):
)
-
class CheckSbdTest(TestCase):
def test_success(self):
mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
node = NodeAddresses("node1")
- lib_sbd.check_sbd(mock_communicator, node, "/dev/watchdog")
+ lib_sbd.check_sbd(
+ mock_communicator, node, "/dev/watchdog", ["/dev/sdb1", "/dev/sdc"]
+ )
mock_communicator.call_node.assert_called_once_with(
- node, "remote/check_sbd", "watchdog=%2Fdev%2Fwatchdog"
+ node, "remote/check_sbd", "watchdog=%2Fdev%2Fwatchdog&" +\
+ "device_list=%5B%22%2Fdev%2Fsdb1%22%2C+%22%2Fdev%2Fsdc%22%5D"
)
@@ -205,6 +291,7 @@ class CheckSbdOnNodeTest(TestCase):
self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
self.mock_rep = MockLibraryReportProcessor()
self.node = NodeAddresses("node1")
+ self.device_list = ["/dev/sdb1", "/dev/sdc"]
def test_success(self, mock_check_sbd):
mock_check_sbd.return_value = json.dumps({
@@ -217,10 +304,11 @@ class CheckSbdOnNodeTest(TestCase):
})
# if no exception was raised, it's fine
lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog"
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
)
mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog"
+ self.mock_com, self.node, "watchdog", self.device_list
)
assert_report_item_list_equal(
self.mock_rep.report_item_list,
@@ -238,11 +326,12 @@ class CheckSbdOnNodeTest(TestCase):
self.assertRaises(
NodeCommunicationException,
lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog"
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
)
)
mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog"
+ self.mock_com, self.node, "watchdog", self.device_list
)
self.assertEqual(0, len(self.mock_rep.report_item_list))
@@ -250,7 +339,8 @@ class CheckSbdOnNodeTest(TestCase):
mock_check_sbd.return_value = "invalid JSON"
assert_raise_library_error(
lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog"
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
),
(
Severities.ERROR,
@@ -259,7 +349,7 @@ class CheckSbdOnNodeTest(TestCase):
)
)
mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog"
+ self.mock_com, self.node, "watchdog", self.device_list
)
self.assertEqual(0, len(self.mock_rep.report_item_list))
@@ -274,7 +364,8 @@ class CheckSbdOnNodeTest(TestCase):
})
assert_raise_library_error(
lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog"
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
),
(
Severities.ERROR,
@@ -283,7 +374,7 @@ class CheckSbdOnNodeTest(TestCase):
)
)
mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog"
+ self.mock_com, self.node, "watchdog", self.device_list
)
self.assertEqual(0, len(self.mock_rep.report_item_list))
@@ -298,7 +389,8 @@ class CheckSbdOnNodeTest(TestCase):
})
assert_raise_library_error(
lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog"
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
),
(
Severities.ERROR,
@@ -307,7 +399,7 @@ class CheckSbdOnNodeTest(TestCase):
)
)
mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog"
+ self.mock_com, self.node, "watchdog", self.device_list
)
self.assertEqual(0, len(self.mock_rep.report_item_list))
@@ -324,7 +416,8 @@ class CheckSbdOnNodeTest(TestCase):
})
assert_raise_library_error(
lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog"
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
),
(
Severities.ERROR,
@@ -338,7 +431,95 @@ class CheckSbdOnNodeTest(TestCase):
)
)
mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog"
+ self.mock_com, self.node, "watchdog", self.device_list
+ )
+ self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+ def test_one_device_does_not_exist(self, mock_check_sbd):
+ mock_check_sbd.return_value = json.dumps({
+ "sbd": {
+ "installed": True
+ },
+ "watchdog": {
+ "exist": True
+ },
+ "device_list": [
+ {
+ "path": "/dev/sbd",
+ "exist": True,
+ "block_device": True,
+ },
+ {
+ "path": "/dev/sdc",
+ "exist": False,
+ "block_device": False,
+ }
+ ]
+ })
+ assert_raise_library_error(
+ lambda: lib_sbd.check_sbd_on_node(
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
+ ),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_DOES_NOT_EXIST,
+ {
+ "device": "/dev/sdc",
+ "node": self.node.label,
+ }
+ )
+ )
+ mock_check_sbd.assert_called_once_with(
+ self.mock_com, self.node, "watchdog", self.device_list
+ )
+ self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+ def test_devices_issues(self, mock_check_sbd):
+ mock_check_sbd.return_value = json.dumps({
+ "sbd": {
+ "installed": True
+ },
+ "watchdog": {
+ "exist": True
+ },
+ "device_list": [
+ {
+ "path": "/dev/sdb",
+ "exist": True,
+ "block_device": False,
+ },
+ {
+ "path": "/dev/sdc",
+ "exist": False,
+ "block_device": False,
+ }
+ ]
+ })
+ assert_raise_library_error(
+ lambda: lib_sbd.check_sbd_on_node(
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
+ ),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_DOES_NOT_EXIST,
+ {
+ "device": "/dev/sdc",
+ "node": self.node.label,
+ }
+ ),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE,
+ {
+ "device": "/dev/sdb",
+ "node": self.node.label,
+ }
+ )
+ )
+ mock_check_sbd.assert_called_once_with(
+ self.mock_com, self.node, "watchdog", self.device_list
)
self.assertEqual(0, len(self.mock_rep.report_item_list))
@@ -353,7 +534,8 @@ class CheckSbdOnNodeTest(TestCase):
})
assert_raise_library_error(
lambda: lib_sbd.check_sbd_on_node(
- self.mock_rep, self.mock_com, self.node, "watchdog"
+ self.mock_rep, self.mock_com, self.node, "watchdog",
+ self.device_list
),
(
Severities.ERROR,
@@ -362,7 +544,7 @@ class CheckSbdOnNodeTest(TestCase):
)
)
mock_check_sbd.assert_called_once_with(
- self.mock_com, self.node, "watchdog"
+ self.mock_com, self.node, "watchdog", self.device_list
)
self.assertEqual(0, len(self.mock_rep.report_item_list))
@@ -374,16 +556,28 @@ class CheckSbdOnAllNodesTest(TestCase):
mock_rep = MockLibraryReportProcessor()
node_list = [NodeAddresses("node" + str(i)) for i in range(2)]
data = {
- node_list[0]: "/dev/watchdog1",
- node_list[1]: "/dev/watchdog2"
+ node_list[0]: {
+ "watchdog": "/dev/watchdog1",
+ "device_list": ["/dev/sdb", "/dev/vda1"],
+ },
+ node_list[1]: {
+ "watchdog": "/dev/watchdog2",
+ "device_list": ["/dev/sda2"],
+ }
}
lib_sbd.check_sbd_on_all_nodes(mock_rep, mock_com, data)
items = sorted(data.items())
mock_func.assert_called_once_with(
lib_sbd.check_sbd_on_node,
[
- ([mock_rep, mock_com, node, watchdog], {})
- for node, watchdog in items
+ (
+ [
+ mock_rep, mock_com, node, data["watchdog"],
+ data["device_list"]
+ ],
+ {}
+ )
+ for node, data in items
]
)
@@ -438,6 +632,33 @@ SBD_WATCHDOG_TIMEOUT=0
)]
)
+ def test_with_devices(self, mock_set_sbd_cfg):
+ cfg_in = {
+ "SBD_WATCHDOG_DEV": "/dev/watchdog",
+ "SBD_WATCHDOG_TIMEOUT": "0",
+ }
+ cfg_out = """# This file has been generated by pcs.
+SBD_DEVICE="/dev/sdb;/dev/vda"
+SBD_OPTS="-n node1"
+SBD_WATCHDOG_DEV=/my/watchdog
+SBD_WATCHDOG_TIMEOUT=0
+"""
+ lib_sbd.set_sbd_config_on_node(
+ self.mock_rep, self.mock_com, self.node, cfg_in, "/my/watchdog",
+ ["/dev/sdb", "/dev/vda"]
+ )
+ mock_set_sbd_cfg.assert_called_once_with(
+ self.mock_com, self.node, cfg_out
+ )
+ assert_report_item_list_equal(
+ self.mock_rep.report_item_list,
+ [(
+ Severities.INFO,
+ report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
+ {"node": self.node.label}
+ )]
+ )
+
@mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
class SetSbdConfigOnAllNodesTest(TestCase):
@@ -448,18 +669,27 @@ class SetSbdConfigOnAllNodesTest(TestCase):
(NodeAddresses("node" + str(i)), "/dev/watchdog" + str(i))
for i in range(5)
])
+ device_dict = dict([
+ (NodeAddresses("node" + str(i)), ["/dev/sdb" + str(i)])
+ for i in range(5)
+ ])
node_list = list(watchdog_dict.keys())
config = {
"opt1": "val1",
"opt2": "val2"
}
lib_sbd.set_sbd_config_on_all_nodes(
- mock_rep, mock_com, node_list, config, watchdog_dict
+ mock_rep, mock_com, node_list, config, watchdog_dict, device_dict
)
mock_func.assert_called_once_with(
lib_sbd.set_sbd_config_on_node,
[
- ([mock_rep, mock_com, node, config, watchdog_dict[node]], {})
+ (
+ [
+ mock_rep, mock_com, node, config, watchdog_dict[node],
+ device_dict[node]
+ ], {}
+ )
for node in node_list
]
)
@@ -743,3 +973,275 @@ class IsSbdInstalledTest(TestCase):
mock_is_service_installed.assert_called_once_with(mock_obj, "sbd")
mock_sbd_name.assert_called_once_with()
+
+class InitializeBlockDeviceTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock()
+ self.mock_rep = MockLibraryReportProcessor()
+
+ def test_success(self):
+ device_list = ["/dev/sdb", "/dev/vda"]
+ option_dict = {
+ "watchdog-timeout": "10", # -1
+ "loop-timeout": "1", # -3
+ }
+ self.mock_runner.run.return_value = "", "", 0
+ lib_sbd.initialize_block_devices(
+ self.mock_rep, self.mock_runner, device_list, option_dict
+ )
+ cmd = [
+ settings.sbd_binary, "-d", "/dev/sdb", "-d", "/dev/vda", "-3", "1",
+ "-1", "10", "create"
+ ]
+ self.mock_runner.run.assert_called_once_with(cmd)
+ assert_report_item_list_equal(
+ self.mock_rep.report_item_list,
+ [
+ (
+ Severities.INFO,
+ report_codes.SBD_DEVICE_INITIALIZATION_STARTED,
+ {"device_list": device_list}
+ ),
+ (
+ Severities.INFO,
+ report_codes.SBD_DEVICE_INITIALIZATION_SUCCESS,
+ {"device_list": device_list}
+ ),
+ ]
+ )
+
+ def test_failed(self):
+ device_list = ["/dev/sdb", "/dev/vda"]
+ option_dict = {
+ "watchdog-timeout": "10", # -1
+ "loop-timeout": "1", # -3
+ }
+ error_msg = "error"
+ self.mock_runner.run.return_value = "", error_msg, 1
+ assert_raise_library_error(
+ lambda: lib_sbd.initialize_block_devices(
+ self.mock_rep, self.mock_runner, device_list, option_dict
+ ),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_INITIALIZATION_ERROR,
+ {
+ "device_list": device_list,
+ "reason": error_msg,
+ }
+ )
+ )
+ cmd = [
+ settings.sbd_binary, "-d", "/dev/sdb", "-d", "/dev/vda", "-3", "1",
+ "-1", "10", "create"
+ ]
+ self.mock_runner.run.assert_called_once_with(cmd)
+ assert_report_item_list_equal(
+ self.mock_rep.report_item_list,
+ [(
+ Severities.INFO,
+ report_codes.SBD_DEVICE_INITIALIZATION_STARTED,
+ {"device_list": device_list}
+ )]
+ )
+
+
+ at mock.patch("os.path.exists")
+ at mock.patch("pcs.lib.sbd.get_local_sbd_config")
+class GetLocalSbdDeviceListTest(TestCase):
+ def test_device_not_defined(self, mock_sbd_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ mock_sbd_config.return_value = outdent("""
+ SBD_WATCHDOG=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=10
+ """)
+ self.assertEqual([], lib_sbd.get_local_sbd_device_list())
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+ mock_sbd_config.assert_called_once_with()
+
+ def test_no_device(self, mock_sbd_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ mock_sbd_config.return_value = outdent("""
+ SBD_WATCHDOG=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=10
+ SBD_DEVICE=""
+ """)
+ self.assertEqual([], lib_sbd.get_local_sbd_device_list())
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+ mock_sbd_config.assert_called_once_with()
+
+ def test_one_device(self, mock_sbd_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ mock_sbd_config.return_value = outdent("""
+ SBD_WATCHDOG=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=10
+ SBD_DEVICE="/dev/vda"
+ """)
+ self.assertEqual(
+ ["/dev/vda"], lib_sbd.get_local_sbd_device_list()
+ )
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+ mock_sbd_config.assert_called_once_with()
+
+ def test_multiple_devices(self, mock_sbd_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ mock_sbd_config.return_value = outdent("""
+ SBD_WATCHDOG=/dev/watchdog
+ SBD_WATCHDOG_TIMEOUT=10
+ SBD_DEVICE="/dev/vda;/dev/sda"
+ """)
+ self.assertEqual(
+ ["/dev/vda", "/dev/sda"], lib_sbd.get_local_sbd_device_list()
+ )
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+ mock_sbd_config.assert_called_once_with()
+
+ def test_config_does_not_exist(self, mock_sbd_config, mock_config_exists):
+ mock_config_exists.return_value = False
+ self.assertEqual([], lib_sbd.get_local_sbd_device_list())
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+ self.assertEqual(0, mock_sbd_config.call_count)
+
+ def test_config_read_error(self, mock_sbd_config, mock_config_exists):
+ mock_config_exists.return_value = True
+ node = "local node"
+ error = "error string"
+ mock_sbd_config.side_effect = LibraryError(
+ reports.unable_to_get_sbd_config(node, error)
+ )
+ assert_raise_library_error(
+ lib_sbd.get_local_sbd_device_list,
+ (
+ Severities.ERROR,
+ report_codes.UNABLE_TO_GET_SBD_CONFIG,
+ {
+ "node": node,
+ "reason": error,
+ }
+ )
+ )
+ mock_config_exists.assert_called_once_with(settings.sbd_config)
+ mock_sbd_config.assert_called_once_with()
+
+
+ at mock.patch("pcs.lib.sbd.get_local_sbd_device_list")
+class IsDeviceSetLocalTest(TestCase):
+ def test_no_device(self, mock_device_list):
+ mock_device_list.return_value = []
+ self.assertFalse(lib_sbd.is_device_set_local())
+ mock_device_list.assert_called_once_with()
+
+ def test_one_device(self, mock_device_list):
+ mock_device_list.return_value = ["device1"]
+ self.assertTrue(lib_sbd.is_device_set_local())
+ mock_device_list.assert_called_once_with()
+
+ def test_multiple_devices(self, mock_device_list):
+ mock_device_list.return_value = ["device1", "device2"]
+ self.assertTrue(lib_sbd.is_device_set_local())
+ mock_device_list.assert_called_once_with()
+
+
+class GetDeviceMessagesInfoTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+ def test_success(self):
+ output = "out"
+ device = "device"
+ self.mock_runner.run.return_value = output, "", 0
+ self.assertEqual(
+ output, lib_sbd.get_device_messages_info(self.mock_runner, device)
+ )
+ self.mock_runner.run.assert_called_once_with(
+ [settings.sbd_binary, "-d", device, "list"]
+ )
+
+ def test_failed(self):
+ output = "out"
+ device = "device"
+ self.mock_runner.run.return_value = output, "", 1
+ assert_raise_library_error(
+ lambda: lib_sbd.get_device_messages_info(self.mock_runner, device),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_LIST_ERROR,
+ {
+ "device": device,
+ "reason": output,
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with(
+ [settings.sbd_binary, "-d", device, "list"]
+ )
+
+
+class GetDeviceSbdHeaderDumpTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+ def test_success(self):
+ output = "out"
+ device = "device"
+ self.mock_runner.run.return_value = output, "", 0
+ self.assertEqual(
+ output, lib_sbd.get_device_sbd_header_dump(self.mock_runner, device)
+ )
+ self.mock_runner.run.assert_called_once_with(
+ [settings.sbd_binary, "-d", device, "dump"]
+ )
+
+ def test_failed(self):
+ output = "out"
+ device = "device"
+ self.mock_runner.run.return_value = output, "", 1
+ assert_raise_library_error(
+ lambda: lib_sbd.get_device_sbd_header_dump(self.mock_runner, device),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_DUMP_ERROR,
+ {
+ "device": device,
+ "reason": output,
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with(
+ [settings.sbd_binary, "-d", device, "dump"]
+ )
+
+
+class SetMessageTest(TestCase):
+ def setUp(self):
+ self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+ def test_success(self):
+ self.mock_runner.run.return_value = "", "", 0
+ lib_sbd.set_message(self.mock_runner, "device", "node", "test")
+ self.mock_runner.run.assert_called_once_with([
+ settings.sbd_binary, "-d", "device", "message", "node", "test"
+ ])
+
+ def test_failure(self):
+ error = "error"
+ self.mock_runner.run.return_value = "", error, 1
+ assert_raise_library_error(
+ lambda: lib_sbd.set_message(
+ self.mock_runner, "device", "node", "test"
+ ),
+ (
+ Severities.ERROR,
+ report_codes.SBD_DEVICE_MESSAGE_ERROR,
+ {
+ "device": "device",
+ "node": "node",
+ "message": "test",
+ "reason": error,
+ }
+ )
+ )
+ self.mock_runner.run.assert_called_once_with([
+ settings.sbd_binary, "-d", "device", "message", "node", "test"
+ ])
+
diff --git a/pcs/test/test_lib_tools.py b/pcs/test/test_lib_tools.py
index 606cb05..3b84bc9 100644
--- a/pcs/test/test_lib_tools.py
+++ b/pcs/test/test_lib_tools.py
@@ -28,6 +28,9 @@ ANOTHER ONE="complex value"
}
self.assertEqual(expected, tools.environment_file_to_dict(data))
+ def test_empty_string(self):
+ self.assertEqual({}, tools.environment_file_to_dict(""))
+
class DictToEnvironmentFileTest(TestCase):
def test_success(self):
diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
index 137c7c7..1935848 100644
--- a/pcs/test/test_node.py
+++ b/pcs/test/test_node.py
@@ -6,188 +6,29 @@ from __future__ import (
)
import shutil
-from pcs.test.tools import pcs_unittest as unittest
-from pcs.test.tools.pcs_unittest import mock
from pcs import node
from pcs.test.tools.assertions import AssertPcsMixin
from pcs.test.tools.misc import (
ac,
get_test_resource as rc,
+ outdent,
)
from pcs.test.tools.pcs_runner import (
pcs,
PcsRunner,
)
+from pcs.test.tools.pcs_unittest import TestCase, mock
from pcs import utils
empty_cib = rc("cib-empty-withnodes.xml")
temp_cib = rc("temp-cib.xml")
-class NodeTest(unittest.TestCase):
+class NodeTest(TestCase):
def setUp(self):
shutil.copy(empty_cib, temp_cib)
- def test_node_maintenance(self):
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-"""
- ac(expected_out, output)
- output, returnVal = pcs(temp_cib, "node maintenance rh7-1")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-Node Attributes:
- rh7-1: maintenance=on
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(temp_cib, "node maintenance rh7-1")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-Node Attributes:
- rh7-1: maintenance=on
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(temp_cib, "node maintenance --all")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-Node Attributes:
- rh7-1: maintenance=on
- rh7-2: maintenance=on
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(temp_cib, "node unmaintenance rh7-2 rh7-1")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(temp_cib, "node maintenance rh7-1 rh7-2")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-Node Attributes:
- rh7-1: maintenance=on
- rh7-2: maintenance=on
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(
- temp_cib, "node maintenance nonexistant-node and-another"
- )
- self.assertEqual(returnVal, 1)
- self.assertEqual(
- output,
- "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
- "Error: Node 'and-another' does not appear to exist in configuration\n"
- )
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-Node Attributes:
- rh7-1: maintenance=on
- rh7-2: maintenance=on
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(temp_cib, "node unmaintenance rh7-1")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-Node Attributes:
- rh7-2: maintenance=on
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(temp_cib, "node unmaintenance rh7-1")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-Node Attributes:
- rh7-2: maintenance=on
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(temp_cib, "node unmaintenance --all")
- ac("", output)
- self.assertEqual(returnVal, 0)
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-"""
- ac(expected_out, output)
-
- output, returnVal = pcs(
- temp_cib, "node unmaintenance nonexistant-node and-another"
- )
- self.assertEqual(returnVal, 1)
- self.assertEqual(
- output,
- "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
- "Error: Node 'and-another' does not appear to exist in configuration\n"
- )
- output, _ = pcs(temp_cib, "property")
- expected_out = """\
-Cluster Properties:
-"""
- ac(expected_out, output)
-
- def test_node_standby(self):
- output, returnVal = pcs(temp_cib, "node standby rh7-1")
- ac(output, "")
- self.assertEqual(returnVal, 0)
-
- # try to standby node which is already in standby mode
- output, returnVal = pcs(temp_cib, "node standby rh7-1")
- ac(output, "")
- self.assertEqual(returnVal, 0)
-
- output, returnVal = pcs(temp_cib, "node unstandby rh7-1")
- ac(output, "")
- self.assertEqual(returnVal, 0)
-
- # try to unstandby node which is no in standby mode
- output, returnVal = pcs(temp_cib, "node unstandby rh7-1")
- ac(output, "")
- self.assertEqual(returnVal, 0)
-
- output, returnVal = pcs(temp_cib, "node standby nonexistant-node")
- self.assertEqual(
- output,
- "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
- )
- self.assertEqual(returnVal, 1)
-
- output, returnVal = pcs(temp_cib, "node unstandby nonexistant-node")
- self.assertEqual(
- output,
- "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
- )
- self.assertEqual(returnVal, 1)
-
-
def test_node_utilization_set(self):
output, returnVal = pcs(temp_cib, "node utilization rh7-1 test1=10")
ac("", output)
@@ -301,7 +142,339 @@ Error: Value of utilization attribute must be integer: 'test=int'
self.assertEqual(1, returnVal)
-class NodeAttributeTest(unittest.TestCase, AssertPcsMixin):
+class NodeStandby(TestCase, AssertPcsMixin):
+ def setUp(self):
+ shutil.copy(rc("cib-empty-with3nodes.xml"), temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+
+ def fixture_standby_all(self):
+ self.assert_pcs_success(
+ "node standby --all"
+ )
+ self.assert_standby_all()
+
+ def assert_standby_none(self):
+ self.assert_pcs_success(
+ "node attribute",
+ "Node Attributes:\n"
+ )
+
+ def assert_standby_all(self):
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-1: standby=on
+ rh7-2: standby=on
+ rh7-3: standby=on
+ """
+ )
+ )
+
+ def test_local_node(self):
+ self.assert_standby_none()
+ self.assert_pcs_fail(
+ "node standby",
+ "Error: Node(s) must be specified if -f is used\n"
+ )
+ self.assert_standby_none()
+
+ self.fixture_standby_all()
+ self.assert_pcs_fail(
+ "node unstandby",
+ "Error: Node(s) must be specified if -f is used\n"
+ )
+ self.assert_standby_all()
+
+ def test_one_bad_node(self):
+ self.assert_standby_none()
+ self.assert_pcs_fail(
+ "node standby nonexistant-node",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ )
+ self.assert_standby_none()
+
+ self.fixture_standby_all()
+ self.assert_pcs_fail(
+ "node unstandby nonexistant-node",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ )
+ self.assert_standby_all()
+
+ def test_bad_node_cancels_all_changes(self):
+ self.assert_standby_none()
+ self.assert_pcs_fail(
+ "node standby rh7-1 nonexistant-node and-another rh7-2",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ "Error: Node 'and-another' does not appear to exist in configuration\n"
+ )
+ self.assert_standby_none()
+
+ self.fixture_standby_all()
+ self.assert_pcs_fail(
+ "node standby rh7-1 nonexistant-node and-another rh7-2",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ "Error: Node 'and-another' does not appear to exist in configuration\n"
+ )
+ self.assert_standby_all()
+
+ def test_all_nodes(self):
+ self.assert_standby_none()
+ self.assert_pcs_success(
+ "node standby --all"
+ )
+ self.fixture_standby_all()
+
+ self.assert_pcs_success(
+ "node unstandby --all"
+ )
+ self.assert_standby_none()
+
+ def test_one_node_with_repeat(self):
+ self.assert_standby_none()
+ self.assert_pcs_success(
+ "node standby rh7-1"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-1: standby=on
+ """
+ )
+ )
+ self.assert_pcs_success(
+ "node standby rh7-1"
+ )
+
+ self.fixture_standby_all()
+ self.assert_pcs_success(
+ "node unstandby rh7-1"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-2: standby=on
+ rh7-3: standby=on
+ """
+ )
+ )
+ self.assert_pcs_success(
+ "node unstandby rh7-1"
+ )
+
+ def test_more_nodes(self):
+ self.assert_standby_none()
+ self.assert_pcs_success(
+ "node standby rh7-1 rh7-2"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-1: standby=on
+ rh7-2: standby=on
+ """
+ )
+ )
+
+ self.fixture_standby_all()
+ self.assert_pcs_success(
+ "node unstandby rh7-1 rh7-2"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-3: standby=on
+ """
+ )
+ )
+
+ def test_all_and_nodelist(self):
+ self.assert_pcs_fail(
+ "node standby rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+ self.assert_pcs_fail(
+ "node unstandby rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+
+
+class NodeMaintenance(TestCase, AssertPcsMixin):
+ def setUp(self):
+ shutil.copy(rc("cib-empty-with3nodes.xml"), temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+
+ def fixture_maintenance_all(self):
+ self.assert_pcs_success(
+ "node maintenance --all"
+ )
+ self.assert_maintenance_all()
+
+ def assert_maintenance_none(self):
+ self.assert_pcs_success(
+ "node attribute",
+ "Node Attributes:\n"
+ )
+
+ def assert_maintenance_all(self):
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-1: maintenance=on
+ rh7-2: maintenance=on
+ rh7-3: maintenance=on
+ """
+ )
+ )
+
+ def test_local_node(self):
+ self.assert_maintenance_none()
+ self.assert_pcs_fail(
+ "node maintenance",
+ "Error: Node(s) must be specified if -f is used\n"
+ )
+ self.assert_maintenance_none()
+
+ self.fixture_maintenance_all()
+ self.assert_pcs_fail(
+ "node unmaintenance",
+ "Error: Node(s) must be specified if -f is used\n"
+ )
+ self.assert_maintenance_all()
+
+ def test_one_bad_node(self):
+ self.assert_maintenance_none()
+ self.assert_pcs_fail(
+ "node maintenance nonexistant-node",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ )
+ self.assert_maintenance_none()
+
+ self.fixture_maintenance_all()
+ self.assert_pcs_fail(
+ "node unmaintenance nonexistant-node",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ )
+ self.assert_maintenance_all()
+
+ def test_bad_node_cancels_all_changes(self):
+ self.assert_maintenance_none()
+ self.assert_pcs_fail(
+ "node maintenance rh7-1 nonexistant-node and-another rh7-2",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ "Error: Node 'and-another' does not appear to exist in configuration\n"
+ )
+ self.assert_maintenance_none()
+
+ self.fixture_maintenance_all()
+ self.assert_pcs_fail(
+ "node maintenance rh7-1 nonexistant-node and-another rh7-2",
+ "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+ "Error: Node 'and-another' does not appear to exist in configuration\n"
+ )
+ self.assert_maintenance_all()
+
+ def test_all_nodes(self):
+ self.assert_maintenance_none()
+ self.assert_pcs_success(
+ "node maintenance --all"
+ )
+ self.fixture_maintenance_all()
+
+ self.assert_pcs_success(
+ "node unmaintenance --all"
+ )
+ self.assert_maintenance_none()
+
+ def test_one_node_with_repeat(self):
+ self.assert_maintenance_none()
+ self.assert_pcs_success(
+ "node maintenance rh7-1"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-1: maintenance=on
+ """
+ )
+ )
+ self.assert_pcs_success(
+ "node maintenance rh7-1"
+ )
+
+ self.fixture_maintenance_all()
+ self.assert_pcs_success(
+ "node unmaintenance rh7-1"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-2: maintenance=on
+ rh7-3: maintenance=on
+ """
+ )
+ )
+ self.assert_pcs_success(
+ "node unmaintenance rh7-1"
+ )
+
+ def test_more_nodes(self):
+ self.assert_maintenance_none()
+ self.assert_pcs_success(
+ "node maintenance rh7-1 rh7-2"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-1: maintenance=on
+ rh7-2: maintenance=on
+ """
+ )
+ )
+
+ self.fixture_maintenance_all()
+ self.assert_pcs_success(
+ "node unmaintenance rh7-1 rh7-2"
+ )
+ self.assert_pcs_success(
+ "node attribute",
+ outdent(
+ """\
+ Node Attributes:
+ rh7-3: maintenance=on
+ """
+ )
+ )
+
+ def test_all_and_nodelist(self):
+ self.assert_pcs_fail(
+ "node maintenance rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+ self.assert_pcs_fail(
+ "node unmaintenance rh7-1 rh7-2 --all",
+ stdout_full="Error: Cannot specify both --all and a list of nodes.\n"
+ )
+
+
+class NodeAttributeTest(TestCase, AssertPcsMixin):
def setUp(self):
shutil.copy(empty_cib, temp_cib)
self.pcs_runner = PcsRunner(temp_cib)
@@ -323,11 +496,15 @@ class NodeAttributeTest(unittest.TestCase, AssertPcsMixin):
])
xml_lines.append('</nodes>')
+ utils_usefile_original = utils.usefile
+ utils_filename_original = utils.filename
utils.usefile = True
utils.filename = temp_cib
output, retval = utils.run([
"cibadmin", "--modify", '--xml-text', "\n".join(xml_lines)
])
+ utils.usefile = utils_usefile_original
+ utils.filename = utils_filename_original
assert output == ""
assert retval == 0
@@ -541,7 +718,7 @@ Node Attributes:
""
)
-class SetNodeUtilizationTest(unittest.TestCase, AssertPcsMixin):
+class SetNodeUtilizationTest(TestCase, AssertPcsMixin):
def setUp(self):
shutil.copy(empty_cib, temp_cib)
self.pcs_runner = PcsRunner(temp_cib)
@@ -556,7 +733,7 @@ class SetNodeUtilizationTest(unittest.TestCase, AssertPcsMixin):
"Error: missing key in '=1' option",
])
-class PrintNodeUtilizationTest(unittest.TestCase, AssertPcsMixin):
+class PrintNodeUtilizationTest(TestCase, AssertPcsMixin):
def setUp(self):
shutil.copy(empty_cib, temp_cib)
self.pcs_runner = PcsRunner(temp_cib)
diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
index 4f15d7f..c0769b5 100644
--- a/pcs/test/test_quorum.py
+++ b/pcs/test/test_quorum.py
@@ -177,17 +177,11 @@ Device:
def test_missing_required_options(self):
self.assert_pcs_fail(
"quorum device add model net",
- """\
-Error: required option 'algorithm' is missing
-Error: required option 'host' is missing
-"""
+ "Error: required options 'algorithm', 'host' are missing\n"
)
self.assert_pcs_fail(
"quorum device add model net --force",
- """\
-Error: required option 'algorithm' is missing
-Error: required option 'host' is missing
-"""
+ "Error: required options 'algorithm', 'host' are missing\n"
)
def test_bad_options(self):
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 6c62676..96eae8f 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -5,21 +5,24 @@ from __future__ import (
unicode_literals,
)
-import os
+from lxml import etree
import re
import shutil
+from pcs.test.tools import pcs_unittest as unittest
from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.cib import get_assert_pcs_effect_mixin
+from pcs.test.tools.pcs_unittest import TestCase
from pcs.test.tools.misc import (
ac,
get_test_resource as rc,
outdent,
+ skip_unless_pacemaker_supports_bundle,
)
from pcs.test.tools.pcs_runner import (
pcs,
PcsRunner,
)
-from pcs.test.tools import pcs_unittest as unittest
from pcs import utils
from pcs import resource
@@ -33,7 +36,19 @@ temp_large_cib = rc("temp-cib-large.xml")
class ResourceDescribeTest(unittest.TestCase, AssertPcsMixin):
def setUp(self):
self.pcs_runner = PcsRunner(temp_cib)
- self.description = outdent("""\
+
+ def fixture_description(self, advanced=False):
+ advanced_params = (
+ """\
+ trace_ra: Set to 1 to turn on resource agent tracing (expect large output) The
+ trace output will be saved to trace_file, if set, or by default to
+ $HA_VARRUN/ra_trace/<type>/<id>.<action>.<timestamp> e.g.
+ $HA_VARRUN/ra_trace/oracle/db.start.2012-11-27.08:37:08
+ trace_file: Path to a file to store resource agent tracing log
+ """
+ )
+ return outdent(
+ """\
ocf:pacemaker:HealthCPU - System health CPU usage
Systhem health agent that measures the CPU idling and updates the #health-cpu attribute.
@@ -46,31 +61,34 @@ class ResourceDescribeTest(unittest.TestCase, AssertPcsMixin):
red_limit: Lower (!) limit of idle percentage to switch the health attribute
to red. I.e. the #health-cpu will go red if the %idle of the CPU
falls below 10%.
-
+{0}
Default operations:
- start: timeout=10
- stop: timeout=10
+ start: interval=0s timeout=10
+ stop: interval=0s timeout=10
monitor: interval=10 start-delay=0 timeout=10
- """
+ """.format(advanced_params if advanced else "")
)
-
def test_success(self):
self.assert_pcs_success(
"resource describe ocf:pacemaker:HealthCPU",
- self.description
+ self.fixture_description()
)
+ def test_full(self):
+ self.assert_pcs_success(
+ "resource describe ocf:pacemaker:HealthCPU --full",
+ self.fixture_description(True)
+ )
def test_success_guess_name(self):
self.assert_pcs_success(
"resource describe healthcpu",
"Assumed agent name 'ocf:pacemaker:HealthCPU' (deduced from"
+ " 'healthcpu')\n"
- + self.description
+ + self.fixture_description()
)
-
def test_nonextisting_agent(self):
self.assert_pcs_fail(
"resource describe ocf:pacemaker:nonexistent",
@@ -81,7 +99,6 @@ class ResourceDescribeTest(unittest.TestCase, AssertPcsMixin):
)
)
-
def test_nonextisting_agent_guess_name(self):
self.assert_pcs_fail(
"resource describe nonexistent",
@@ -91,7 +108,6 @@ class ResourceDescribeTest(unittest.TestCase, AssertPcsMixin):
)
)
-
def test_more_agents_guess_name(self):
self.assert_pcs_fail(
"resource describe dummy",
@@ -101,14 +117,12 @@ class ResourceDescribeTest(unittest.TestCase, AssertPcsMixin):
)
)
-
def test_not_enough_params(self):
self.assert_pcs_fail(
"resource describe",
stdout_start="\nUsage: pcs resource describe...\n"
)
-
def test_too_many_params(self):
self.assert_pcs_fail(
"resource describe agent1 agent2",
@@ -124,67 +138,59 @@ class ResourceTest(unittest.TestCase, AssertPcsMixin):
# Setups up a cluster with Resources, groups, master/slave resource & clones
def setupClusterA(self,temp_cib):
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP2 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP3 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP4 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP5 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP6 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
-
- line = "resource group add TestGroup1 ClusterIP"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
-
- line = "resource group add TestGroup2 ClusterIP2 ClusterIP3"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
-
- line = "resource clone ClusterIP4"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
-
- line = "resource master Master ClusterIP5"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP2 ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP3 ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP4 ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP5 ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP6 ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
+ self.assert_pcs_success("resource group add TestGroup1 ClusterIP")
+ self.assert_pcs_success(
+ "resource group add TestGroup2 ClusterIP2 ClusterIP3"
+ )
+ self.assert_pcs_success("resource clone ClusterIP4")
+ self.assert_pcs_success("resource master Master ClusterIP5")
def testCaseInsensitive(self):
o,r = pcs(temp_cib, "resource create --no-default-ops D0 dummy")
ac(o, "Error: Multiple agents match 'dummy', please specify full name: ocf:heartbeat:Dummy, ocf:pacemaker:Dummy\n")
assert r == 1
- o,r = pcs(temp_cib, "resource create --no-default-ops D1 systemhealth")
- ac(o, "Creating resource 'ocf:pacemaker:SystemHealth'\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops D1 systemhealth",
+ "Assumed agent name 'ocf:pacemaker:SystemHealth'"
+ " (deduced from 'systemhealth')\n"
+ )
- o,r = pcs(temp_cib, "resource create --no-default-ops D2 SYSTEMHEALTH")
- ac(o, "Creating resource 'ocf:pacemaker:SystemHealth'\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops D2 SYSTEMHEALTH",
+ "Assumed agent name 'ocf:pacemaker:SystemHealth'"
+ " (deduced from 'SYSTEMHEALTH')\n"
+ )
- o,r = pcs(temp_cib, "resource create --no-default-ops D3 ipaddr2 ip=1.1.1.1")
- ac(o, "Creating resource 'ocf:heartbeat:IPaddr2'\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops D3 ipaddr2 ip=1.1.1.1",
+ "Assumed agent name 'ocf:heartbeat:IPaddr2'"
+ " (deduced from 'ipaddr2')\n"
+ )
o,r = pcs(temp_cib, "resource create --no-default-ops D4 ipaddr3")
ac(o,"Error: Unable to find agent 'ipaddr3', try specifying its full name\n")
@@ -195,7 +201,6 @@ class ResourceTest(unittest.TestCase, AssertPcsMixin):
assert returnVal == 0, 'Unable to list resources'
assert output == "NO resources configured\n", "Bad output"
-
def testAddResourcesLargeCib(self):
output, returnVal = pcs(
temp_large_cib,
@@ -206,215 +211,32 @@ class ResourceTest(unittest.TestCase, AssertPcsMixin):
output, returnVal = pcs(temp_large_cib, "resource show dummy0")
assert returnVal == 0
- ac(output, """\
- Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (dummy0-start-interval-0s)
- stop interval=0s timeout=20 (dummy0-stop-interval-0s)
- monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
-""")
-
- def testAddResources(self):
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
-
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 1
- assert output == "Error: unable to create resource/fence device 'ClusterIP', 'ClusterIP' already exists on this system\n",[output]
-
- line = "resource create --no-default-ops ClusterIP2 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP3 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP4 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP5 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP6 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=31s start interval=32s op stop interval=33s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
- line = "resource create --no-default-ops ClusterIP7 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s --disabled"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
-
-# Verify all resource have been added
- output, returnVal = pcs(temp_cib, "resource show")
- assert returnVal == 0
- ac(output, """\
- ClusterIP\t(ocf::heartbeat:IPaddr2):\tStopped
- ClusterIP2\t(ocf::heartbeat:IPaddr2):\tStopped
- ClusterIP3\t(ocf::heartbeat:IPaddr2):\tStopped
- ClusterIP4\t(ocf::heartbeat:IPaddr2):\tStopped
- ClusterIP5\t(ocf::heartbeat:IPaddr2):\tStopped
- ClusterIP6\t(ocf::heartbeat:IPaddr2):\tStopped
- ClusterIP7\t(ocf::heartbeat:IPaddr2):\tStopped (disabled)
-""")
-
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
- Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
- Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
- Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
- Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=31s (ClusterIP6-monitor-interval-31s)
- start interval=32s (ClusterIP6-start-interval-32s)
- stop interval=33s (ClusterIP6-stop-interval-33s)
- Resource: ClusterIP7 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Meta Attrs: target-role=Stopped
- Operations: monitor interval=30s (ClusterIP7-monitor-interval-30s)
-""")
- self.assertEqual(0, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create A ocf:heartbeat:Dummy op interval=10"
- )
- ac(output, """\
-Error: When using 'op' you must specify an operation name and at least one option
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create A ocf:heartbeat:Dummy op interval=10 timeout=5"
- )
- ac(output, """\
-Error: When using 'op' you must specify an operation name after 'op'
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create A ocf:heartbeat:Dummy op monitor interval=10 op interval=10 op start timeout=10"
- )
- ac(output, """\
-Error: When using 'op' you must specify an operation name and at least one option
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create A ocf:heartbeat:Dummy op monitor"
- )
- ac(output, """\
-Error: When using 'op' you must specify an operation name and at least one option
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create A ocf:heartbeat:Dummy op monitor interval=10 op stop op start timeout=10"
- )
- ac(output, """\
-Error: When using 'op' you must specify an operation name and at least one option
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create A ocf:heartbeat:Dummy op monitor interval=10 timeout=10 op monitor interval=10 timeout=20"
- )
- ac(output, """\
-Error: operation monitor with interval 10s already specified for A:
-monitor interval=10 timeout=10 (A-monitor-interval-10)
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create A ocf:heartbeat:Dummy op monitor interval=10 timeout=10 op stop interval=10 timeout=20"
- )
- ac(output, "")
- self.assertEqual(0, returnVal)
-
- output, returnVal = pcs(temp_cib, "resource show A")
- ac(output, """\
- Resource: A (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (A-start-interval-0s)
- monitor interval=10 timeout=10 (A-monitor-interval-10)
- stop interval=10 timeout=20 (A-stop-interval-10)
-""")
- self.assertEqual(0, returnVal)
-
- def testAddBadResources(self):
- line = "resource create --no-default-ops bad_resource idontexist test=bad"
- output, returnVal = pcs(temp_cib, line)
- assert output == "Error: Unable to find agent 'idontexist', try specifying its full name\n",[output]
- assert returnVal == 1
-
- line = "resource create --no-default-ops bad_resource2 idontexist2 test4=bad3 --force"
- output, returnVal = pcs(temp_cib, line)
- ac(output, "Error: Unable to find agent 'idontexist2', try specifying its full name\n")
- assert returnVal == 1
-
- line = "resource create --no-default-ops bad_resource3 ocf:pacemaker:idontexist3 test=bad"
- output, returnVal = pcs(temp_cib, line)
- assert output == "Error: Unable to create resource 'ocf:pacemaker:idontexist3', it is not installed on this system (use --force to override)\n",[output]
- assert returnVal == 1
-
- line = "resource create --no-default-ops bad_resource4 ocf:pacemaker:idontexist4 test4=bad3 --force"
- output, returnVal = pcs(temp_cib, line)
- ac(output, "Warning: 'ocf:pacemaker:idontexist4' is not installed or does not provide valid metadata\n")
- assert returnVal == 0
-
- line = "resource show --full"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- ac(output, """\
- Resource: bad_resource4 (class=ocf provider=pacemaker type=idontexist4)
- Attributes: test4=bad3
- Operations: monitor interval=60s (bad_resource4-monitor-interval-60s)
-""")
-
- output, returnVal = pcs(
- temp_cib,
- "resource create dum:my ocf:heartbeat:Dummy"
- )
- assert returnVal == 1
- ac(output, "Error: invalid resource name 'dum:my', ':' is not a valid character for a resource name\n")
+ ac(output, outdent(
+ """\
+ Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
+ start interval=0s timeout=20 (dummy0-start-interval-0s)
+ stop interval=0s timeout=20 (dummy0-stop-interval-0s)
+ """
+ ))
def testDeleteResources(self):
-# Verify deleting resources works
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
+ # Verify deleting resources works
+ # Additional tests are in class BundleDeleteTest
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
line = 'resource delete'
output, returnVal = pcs(temp_cib, line)
assert returnVal == 1
assert output.startswith("\nUsage: pcs resource")
- line = "resource delete ClusterIP"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == "Deleting Resource - ClusterIP\n"
+ self.assert_pcs_success(
+ "resource delete ClusterIP",
+ "Deleting Resource - ClusterIP\n"
+ )
output, returnVal = pcs(temp_cib, "resource show ClusterIP")
assert returnVal == 1
@@ -424,29 +246,30 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
assert returnVal == 0
assert output == 'NO resources configured\n'
- output, returnVal = pcs(temp_cib, "resource delete ClusterIP")
- assert returnVal == 1
- ac(output, "Error: Resource 'ClusterIP' does not exist.\n")
+ self.assert_pcs_fail(
+ "resource delete ClusterIP",
+ "Error: Resource 'ClusterIP' does not exist.\n"
+ )
def testResourceShow(self):
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource show ClusterIP")
- assert returnVal == 0
- ac(output, """\
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
-""")
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
+ self.assert_pcs_success("resource show ClusterIP", outdent(
+ """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+ """
+ ))
def testResourceUpdate(self):
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
+ # see also BundleMiscCommands
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
line = 'resource update'
output, returnVal = pcs(temp_cib, line)
@@ -458,10 +281,11 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
assert output == ""
def testAddOperation(self):
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- ac(output,"")
- assert returnVal == 0
+ # see also BundleMiscCommands
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
o,r = pcs(temp_cib, "resource add_operation")
assert r == 1
@@ -520,14 +344,14 @@ Error: moni=tor does not appear to be a valid operation action
""")
assert returnVal == 1
- output, returnVal = pcs(temp_cib, "resource show ClusterIP")
- assert returnVal == 0
- ac(output, """\
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- monitor interval=31s (ClusterIP-monitor-interval-31s)
-""")
+ self.assert_pcs_success("resource show ClusterIP", outdent(
+ """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+ monitor interval=31s (ClusterIP-monitor-interval-31s)
+ """
+ ))
o, r = pcs(temp_cib, "resource create --no-default-ops OPTest ocf:heartbeat:Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=1")
ac(o,"")
@@ -605,9 +429,13 @@ start interval=0s timeout=30s (OPTest2-start-interval-0s)
ac(o,"")
assert r == 0
- o, r = pcs(temp_cib, "resource show OPTest6")
- ac(o," Resource: OPTest6 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (OPTest6-monitor-interval-60s)\n monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest6-monitor-interval-30s)\n")
- assert r == 0
+ self.assert_pcs_success("resource show OPTest6", outdent(
+ """\
+ Resource: OPTest6 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (OPTest6-monitor-interval-10)
+ monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest6-monitor-interval-30s)
+ """
+ ))
o,r = pcs(temp_cib, "resource create --no-default-ops OPTest7 ocf:heartbeat:Dummy")
ac(o,"")
@@ -643,12 +471,15 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
ac(o,"")
assert r == 0
- o,r = pcs(temp_cib, "resource op add OCFTest1 monitor interval=31s")
- ac(o, """\
-Error: operation monitor already specified for OCFTest1, use --force to override:
-monitor interval=60s (OCFTest1-monitor-interval-60s)
-""")
- self.assertEqual(1, r)
+ self.assert_pcs_fail(
+ "resource op add OCFTest1 monitor interval=31s",
+ outdent(
+ """\
+ Error: operation monitor already specified for OCFTest1, use --force to override:
+ monitor interval=10 timeout=20 (OCFTest1-monitor-interval-10)
+ """
+ )
+ )
o,r = pcs(temp_cib, "resource op add OCFTest1 monitor interval=31s --force")
ac(o,"")
@@ -658,9 +489,17 @@ monitor interval=60s (OCFTest1-monitor-interval-60s)
ac(o,"")
assert r == 0
- o,r = pcs(temp_cib, "resource show OCFTest1")
- ac(o," Resource: OCFTest1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (OCFTest1-monitor-interval-60s)\n monitor interval=31s (OCFTest1-monitor-interval-31s)\n monitor interval=30s OCF_CHECK_LEVEL=15 (OCFTest1-monitor-interval-30s)\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource show OCFTest1",
+ outdent(
+ """\
+ Resource: OCFTest1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (OCFTest1-monitor-interval-10)
+ monitor interval=31s (OCFTest1-monitor-interval-31s)
+ monitor interval=30s OCF_CHECK_LEVEL=15 (OCFTest1-monitor-interval-30s)
+ """
+ )
+ )
o,r = pcs(temp_cib, "resource update OCFTest1 op monitor interval=61s OCF_CHECK_LEVEL=5")
ac(o,"")
@@ -686,50 +525,51 @@ monitor interval=60s (OCFTest1-monitor-interval-60s)
ac(o," Resource: OCFTest1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=35s OCF_CHECK_LEVEL=4 (OCFTest1-monitor-interval-35s)\n monitor interval=31s (OCFTest1-monitor-interval-31s)\n monitor interval=30s OCF_CHECK_LEVEL=15 (OCFTest1-monitor-interval-30s)\n")
assert r == 0
- output, retVal = pcs(
- temp_cib,
- "resource create --no-default-ops state ocf:pacemaker:Stateful"
+ self.assert_pcs_success(
+ "resource create --no-default-ops state ocf:pacemaker:Stateful",
+ "Warning: changing a monitor operation interval from 10 to 11 to"
+ " make the operation unique\n"
)
- ac(output, "")
- self.assertEqual(0, retVal)
- output, retVal = pcs(
- temp_cib, "resource op add state monitor interval=10"
+ self.assert_pcs_fail(
+ "resource op add state monitor interval=10",
+ outdent(
+ """\
+ Error: operation monitor with interval 10s already specified for state:
+ monitor interval=10 role=Master timeout=20 (state-monitor-interval-10)
+ """
+ )
)
- ac(output, """\
-Error: operation monitor already specified for state, use --force to override:
-monitor interval=60s (state-monitor-interval-60s)
-""")
- self.assertEqual(1, retVal)
- output, retVal = pcs(
- temp_cib, "resource op add state monitor interval=10 role=Started"
+ self.assert_pcs_fail(
+ "resource op add state monitor interval=10 role=Started",
+ outdent(
+ """\
+ Error: operation monitor with interval 10s already specified for state:
+ monitor interval=10 role=Master timeout=20 (state-monitor-interval-10)
+ """
+ )
)
- ac(output, """\
-Error: operation monitor already specified for state, use --force to override:
-monitor interval=60s (state-monitor-interval-60s)
-""")
- self.assertEqual(1, retVal)
- output, retVal = pcs(
- temp_cib, "resource op add state monitor interval=10 role=Master"
+ self.assert_pcs_success(
+ "resource op add state monitor interval=15 role=Master --force"
)
- ac(output, "")
- self.assertEqual(0, retVal)
- output, retVal = pcs(temp_cib, "resource show state")
- ac(output, """\
- Resource: state (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (state-monitor-interval-60s)
- monitor interval=10 role=Master (state-monitor-interval-10)
-""")
- self.assertEqual(0, retVal)
+ self.assert_pcs_success("resource show state", outdent(
+ """\
+ Resource: state (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (state-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (state-monitor-interval-11)
+ monitor interval=15 role=Master (state-monitor-interval-15)
+ """
+ ))
def testRemoveOperation(self):
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
+ # see also BundleMiscCommands
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
line = 'resource op add ClusterIP monitor interval=31s --force'
output, returnVal = pcs(temp_cib, line)
@@ -761,25 +601,24 @@ monitor interval=60s (state-monitor-interval-60s)
assert returnVal == 1
assert output == 'Error: Unable to find operation matching: monitor interval=30s\n'
- output, returnVal = pcs(temp_cib, "resource show ClusterIP")
- ac(output, """\
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=31s (ClusterIP-monitor-interval-31s)
-""")
- assert returnVal == 0
+ self.assert_pcs_success("resource show ClusterIP", outdent(
+ """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=31s (ClusterIP-monitor-interval-31s)
+ """
+ ))
- line = 'resource op remove ClusterIP monitor interval=31s'
- output, returnVal = pcs(temp_cib, line)
- assert returnVal == 0
- assert output == ""
+ self.assert_pcs_success(
+ 'resource op remove ClusterIP monitor interval=31s'
+ )
- output, returnVal = pcs(temp_cib, "resource show ClusterIP")
- ac(output, """\
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
-""")
- assert returnVal == 0
+ self.assert_pcs_success("resource show ClusterIP", outdent(
+ """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ """
+ ))
line = 'resource op add ClusterIP monitor interval=31s'
output, returnVal = pcs(temp_cib, line)
@@ -807,20 +646,20 @@ monitor interval=60s (state-monitor-interval-60s)
assert returnVal == 0
assert output == ""
- output, returnVal = pcs(temp_cib, "resource show ClusterIP")
- ac(output, """\
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: stop interval=0s timeout=34s (ClusterIP-stop-interval-0s)
- start interval=0s timeout=33s (ClusterIP-start-interval-0s)
-""")
- assert returnVal == 0
+ self.assert_pcs_success("resource show ClusterIP", outdent(
+ """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: stop interval=0s timeout=34s (ClusterIP-stop-interval-0s)
+ start interval=0s timeout=33s (ClusterIP-start-interval-0s)
+ """
+ ))
def testUpdateOperation(self):
- line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
- output, returnVal = pcs(temp_cib, line)
- assert output == ""
- assert returnVal == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
line = 'resource update ClusterIP op monitor interval=32s'
output, returnVal = pcs(temp_cib, line)
@@ -847,15 +686,14 @@ monitor interval=60s (state-monitor-interval-60s)
assert returnVal == 0
assert output == ""
- line = 'resource show ClusterIP'
- output, returnVal = pcs(temp_cib, line)
- ac(output, """\
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=33s (ClusterIP-monitor-interval-33s)
- start interval=30s timeout=180s (ClusterIP-start-interval-30s)
-""")
- assert returnVal == 0
+ self.assert_pcs_success("resource show ClusterIP", outdent(
+ """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=33s (ClusterIP-monitor-interval-33s)
+ start interval=30s timeout=180s (ClusterIP-start-interval-30s)
+ """
+ ))
output, returnVal = pcs(
temp_cib,
@@ -864,15 +702,15 @@ monitor interval=60s (state-monitor-interval-60s)
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show A")
- ac(output, """\
- Resource: A (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (A-start-interval-0s)
- stop interval=0s timeout=20 (A-stop-interval-0s)
- monitor interval=10 (A-monitor-interval-10)
- monitor interval=20 (A-monitor-interval-20)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show A", outdent(
+ """\
+ Resource: A (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 (A-monitor-interval-10)
+ monitor interval=20 (A-monitor-interval-20)
+ start interval=0s timeout=20 (A-start-interval-0s)
+ stop interval=0s timeout=20 (A-stop-interval-0s)
+ """
+ ))
output, returnVal = pcs(
temp_cib,
@@ -891,15 +729,15 @@ monitor interval=20 (A-monitor-interval-20)
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show A")
- ac(output, """\
- Resource: A (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (A-start-interval-0s)
- stop interval=0s timeout=20 (A-stop-interval-0s)
- monitor interval=11 (A-monitor-interval-11)
- monitor interval=20 (A-monitor-interval-20)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show A", outdent(
+ """\
+ Resource: A (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=11 (A-monitor-interval-11)
+ monitor interval=20 (A-monitor-interval-20)
+ start interval=0s timeout=20 (A-start-interval-0s)
+ stop interval=0s timeout=20 (A-stop-interval-0s)
+ """
+ ))
output, returnVal = pcs(
temp_cib,
@@ -908,12 +746,7 @@ monitor interval=20 (A-monitor-interval-20)
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(
- temp_cib,
- "resource op remove B-monitor-interval-60s"
- )
- ac(output, "")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource op remove B-monitor-interval-10")
output, returnVal = pcs(temp_cib, "resource show B")
ac(output, """\
@@ -1043,9 +876,15 @@ monitor interval=20 (A-monitor-interval-20)
A3\t(ocf::heartbeat:Dummy):\tStopped
""")
- o,r = pcs(temp_cib, "resource delete AGroup")
- ac(o,"Removing group: AGroup (and all resources within group)\nStopping all resources in group: AGroup...\nDeleting Resource - A1\nDeleting Resource - A2\nDeleting Resource (and group) - A3\n")
- assert r == 0
+ self.assert_pcs_success("resource delete AGroup", outdent(
+ """\
+ Removing group: AGroup (and all resources within group)
+ Stopping all resources in group: AGroup...
+ Deleting Resource - A1
+ Deleting Resource - A2
+ Deleting Resource (and group) - A3
+ """
+ ))
o,r = pcs(temp_cib, "resource show")
assert r == 0
@@ -1057,13 +896,17 @@ monitor interval=20 (A-monitor-interval-20)
assert returnVal == 0
assert output == ""
- output, returnVal = pcs(temp_cib, "resource delete ClusterIP2")
- assert returnVal == 0
- assert output =='Deleting Resource - ClusterIP2\n'
+ self.assert_pcs_success(
+ "resource delete ClusterIP2",
+ "Deleting Resource - ClusterIP2\n"
+ )
- output, returnVal = pcs(temp_cib, "resource delete ClusterIP3")
- assert returnVal == 0
- assert output =="Removing Constraint - location-ClusterIP3-rh7-1-INFINITY\nDeleting Resource (and group) - ClusterIP3\n"
+ self.assert_pcs_success("resource delete ClusterIP3", outdent(
+ """\
+ Removing Constraint - location-ClusterIP3-rh7-1-INFINITY
+ Deleting Resource (and group) - ClusterIP3
+ """
+ ))
o,r = pcs(
temp_cib,
@@ -1094,10 +937,21 @@ monitor interval=20 (A-monitor-interval-20)
o,r = pcs(temp_cib, "resource group add AGroup A1 A2 A3 A4 A5")
assert r == 0
- o,r = pcs(temp_cib, "resource show AGroup")
- assert r == 0
- ac(o,' Group: AGroup\n Resource: A1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A1-monitor-interval-60s)\n Resource: A2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A2-monitor-interval-60s)\n Resource: A3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A3-monitor-interval-60s)\n Resource: A4 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A4-m [...]
-
+ self.assert_pcs_success("resource show AGroup", outdent(
+ """\
+ Group: AGroup
+ Resource: A1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A1-monitor-interval-10)
+ Resource: A2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A2-monitor-interval-10)
+ Resource: A3 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A3-monitor-interval-10)
+ Resource: A4 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A4-monitor-interval-10)
+ Resource: A5 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A5-monitor-interval-10)
+ """
+ ))
o,r = pcs(temp_cib, "resource ungroup Noexist")
assert r == 1
@@ -1150,11 +1004,23 @@ Ticket Constraints:
assert r == 1
ac(o,"Error: unable to find resource 'AGroup'\n")
- o,r = pcs(temp_cib, "resource show A1 A2 A3 A4 A5")
- assert r == 0
- ac(o,' Resource: A1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A1-monitor-interval-60s)\n Resource: A2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A2-monitor-interval-60s)\n Resource: A3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A3-monitor-interval-60s)\n Resource: A4 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A4-monitor-interval-60s)\n R [...]
+ self.assert_pcs_success("resource show A1 A2 A3 A4 A5", outdent(
+ """\
+ Resource: A1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A1-monitor-interval-10)
+ Resource: A2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A2-monitor-interval-10)
+ Resource: A3 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A3-monitor-interval-10)
+ Resource: A4 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A4-monitor-interval-10)
+ Resource: A5 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A5-monitor-interval-10)
+ """
+ ))
def testGroupAdd(self):
+ # see also BundleGroup
o,r = pcs(
temp_cib,
"resource create --no-default-ops A1 ocf:heartbeat:Dummy"
@@ -1455,11 +1321,13 @@ Ticket Constraints:
assert returnVal == 0
output, returnVal = pcs(temp_large_cib, "resource delete dummies")
- ac(output, """\
-Removing group: dummies (and all resources within group)
-Stopping all resources in group: dummies...
-Deleting Resource (and group) - dummylarge
-""")
+ ac(output, outdent(
+ """\
+ Removing group: dummies (and all resources within group)
+ Stopping all resources in group: dummies...
+ Deleting Resource (and group) - dummylarge
+ """
+ ))
assert returnVal == 0
def testGroupOrder(self):
@@ -1663,26 +1531,26 @@ Deleting Resource (and group) - dummylarge
Resources:
Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
+ Attributes: cidr_netmask=32 ip=192.168.0.99
Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)
Group: TestGroup1
Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
+ Attributes: cidr_netmask=32 ip=192.168.0.99
Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
Group: TestGroup2
Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
+ Attributes: cidr_netmask=32 ip=192.168.0.99
Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
+ Attributes: cidr_netmask=32 ip=192.168.0.99
Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
Clone: ClusterIP4-clone
Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
+ Attributes: cidr_netmask=32 ip=192.168.0.99
Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
Master: Master
Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
+ Attributes: cidr_netmask=32 ip=192.168.0.99
Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
Stonith Devices:
@@ -1723,17 +1591,21 @@ Deleting Resource (and group) - dummylarge
ac(o,"")
assert r == 0
- o,r = pcs("resource --full")
- ac(o," Clone: D1-clone\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n")
- assert r == 0
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Clone: D1-clone
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
- o,r = pcs("resource delete D1-clone")
- assert r == 0
- ac(o, """\
-Removing Constraint - location-D1-clone-rh7-1-INFINITY
-Removing Constraint - location-D1-rh7-1-INFINITY
-Deleting Resource - D1
-""")
+ self.assert_pcs_success("resource delete D1-clone", outdent(
+ """\
+ Removing Constraint - location-D1-clone-rh7-1-INFINITY
+ Removing Constraint - location-D1-rh7-1-INFINITY
+ Deleting Resource - D1
+ """
+ ))
o,r = pcs("resource --full")
assert r == 0
@@ -1745,9 +1617,10 @@ Deleting Resource - D1
ac(o, "")
assert r == 0
- o, r = pcs("resource delete d99")
- ac(o, "Deleting Resource - d99\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource delete d99",
+ "Deleting Resource - d99\n"
+ )
output, returnVal = pcs(temp_large_cib, "resource clone dummylarge")
ac(output, '')
@@ -1769,11 +1642,13 @@ Deleting Resource - D1
assert returnVal == 0
output, returnVal = pcs(temp_large_cib, "resource delete dummies")
- ac(output, """\
-Removing group: dummies (and all resources within group)
-Stopping all resources in group: dummies...
-Deleting Resource (and group and clone) - dummylarge
-""")
+ ac(output, outdent(
+ """\
+ Removing group: dummies (and all resources within group)
+ Stopping all resources in group: dummies...
+ Deleting Resource (and group and clone) - dummylarge
+ """
+ ))
assert returnVal == 0
def testMasterSlaveRemove(self):
@@ -1786,13 +1661,13 @@ Deleting Resource (and group and clone) - dummylarge
assert returnVal == 0
assert output == ""
- output, returnVal = pcs(temp_cib, "resource delete Master")
- assert returnVal == 0
- ac(output, """\
-Removing Constraint - location-Master-rh7-2-INFINITY
-Removing Constraint - location-ClusterIP5-rh7-1-INFINITY
-Deleting Resource - ClusterIP5
-""")
+ self.assert_pcs_success("resource delete Master", outdent(
+ """\
+ Removing Constraint - location-Master-rh7-2-INFINITY
+ Removing Constraint - location-ClusterIP5-rh7-1-INFINITY
+ Deleting Resource - ClusterIP5
+ """
+ ))
output, returnVal = pcs(
temp_cib,
@@ -1808,14 +1683,18 @@ Deleting Resource - ClusterIP5
output, returnVal = pcs(temp_cib, "constraint location ClusterIP5 prefers rh7-2")
assert returnVal == 0
assert output == ""
+ self.assert_pcs_success("resource delete ClusterIP5", outdent(
+ """\
+ Removing Constraint - location-ClusterIP5-rh7-1-INFINITY
+ Removing Constraint - location-ClusterIP5-rh7-2-INFINITY
+ Deleting Resource - ClusterIP5
+ """
+ ))
- output, returnVal = pcs(temp_cib, "resource delete ClusterIP5")
- assert returnVal == 0
- assert output == "Removing Constraint - location-ClusterIP5-rh7-1-INFINITY\nRemoving Constraint - location-ClusterIP5-rh7-2-INFINITY\nDeleting Resource - ClusterIP5\n",[output]
-
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops ClusterIP5 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s")
- assert returnVal == 0
- assert output == ""
+ self.assert_pcs_success(
+ "resource create --no-default-ops ClusterIP5 ocf:heartbeat:IPaddr2"
+ " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
+ )
output, returnVal = pcs(temp_cib, "constraint location ClusterIP5 prefers rh7-1")
assert returnVal == 0
@@ -1825,59 +1704,61 @@ Deleting Resource - ClusterIP5
assert returnVal == 0
assert output == ""
- self.assert_pcs_success("config","""\
-Cluster Name: test99
-Corosync Nodes:
- rh7-1 rh7-2
-Pacemaker Nodes:
-
-Resources:
- Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)
- Group: TestGroup1
- Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- Group: TestGroup2
- Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
- Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
- Clone: ClusterIP4-clone
- Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
- Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=192.168.0.99 cidr_netmask=32
- Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
-
-Stonith Devices:
-Fencing Levels:
+ self.assert_pcs_success("config", outdent(
+ """\
+ Cluster Name: test99
+ Corosync Nodes:
+ rh7-1 rh7-2
+ Pacemaker Nodes:
-Location Constraints:
- Resource: ClusterIP5
- Enabled on: rh7-1 (score:INFINITY) (id:location-ClusterIP5-rh7-1-INFINITY)
- Enabled on: rh7-2 (score:INFINITY) (id:location-ClusterIP5-rh7-2-INFINITY)
-Ordering Constraints:
-Colocation Constraints:
-Ticket Constraints:
+ Resources:
+ Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)
+ Group: TestGroup1
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+ Group: TestGroup2
+ Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
+ Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
+ Clone: ClusterIP4-clone
+ Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
+ Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: cidr_netmask=32 ip=192.168.0.99
+ Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
+
+ Stonith Devices:
+ Fencing Levels:
-Alerts:
- No alerts defined
+ Location Constraints:
+ Resource: ClusterIP5
+ Enabled on: rh7-1 (score:INFINITY) (id:location-ClusterIP5-rh7-1-INFINITY)
+ Enabled on: rh7-2 (score:INFINITY) (id:location-ClusterIP5-rh7-2-INFINITY)
+ Ordering Constraints:
+ Colocation Constraints:
+ Ticket Constraints:
-Resources Defaults:
- No defaults set
-Operations Defaults:
- No defaults set
+ Alerts:
+ No alerts defined
-Cluster Properties:
+ Resources Defaults:
+ No defaults set
+ Operations Defaults:
+ No defaults set
-Quorum:
- Options:
-""")
+ Cluster Properties:
+
+ Quorum:
+ Options:
+ """
+ ))
output, returnVal = pcs(temp_large_cib, "resource master dummylarge")
ac(output, '')
@@ -1899,338 +1780,24 @@ Quorum:
assert returnVal == 0
output, returnVal = pcs(temp_large_cib, "resource delete dummies")
- ac(output, """\
-Removing group: dummies (and all resources within group)
-Stopping all resources in group: dummies...
-Deleting Resource (and group and M/S) - dummylarge
-""")
- assert returnVal == 0
-
- def testResourceManage(self):
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops D0 ocf:heartbeat:Dummy"
- )
- assert returnVal == 0
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
- )
- assert returnVal == 0
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops D2 ocf:heartbeat:Dummy"
- )
- assert returnVal == 0
- output, returnVal = pcs(temp_cib, "resource group add DGroup D0")
- assert returnVal == 0
- output, returnVal = pcs(temp_cib, "resource unmanage D1")
- assert returnVal == 0
- assert output == ""
- output, returnVal = pcs(temp_cib, "resource unmanage D1")
- assert returnVal == 0
- assert output == "",[output]
- output, returnVal = pcs(temp_cib, "resource manage D2")
- assert returnVal == 0
- assert output == "",[output]
- output, returnVal = pcs(temp_cib, "resource manage D1")
- assert returnVal == 0
- assert output == "",[output]
- output, returnVal = pcs(temp_cib, "resource unmanage D1")
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops C1Master ocf:heartbeat:Dummy --master"
- )
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops C2Master ocf:heartbeat:Dummy --master"
- )
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops C3Master ocf:heartbeat:Dummy --clone"
- )
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops C4Master ocf:heartbeat:Dummy clone"
- )
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource unmanage C1Master")
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource manage C1Master")
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource unmanage C2Master-master")
- ac(output,"")
- assert returnVal == 0
-
- output, returnVal = pcs(temp_cib, "resource manage C2Master-master")
- assert returnVal == 0
- assert output == ""
-
-
- output, returnVal = pcs(temp_cib, "resource unmanage C3Master")
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource manage C3Master")
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource unmanage C4Master-clone")
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource manage C4Master-clone")
- assert returnVal == 0
- assert output == ""
-
- output, returnVal = pcs(temp_cib, "resource show D1")
- assert returnVal == 0
- assert output == ' Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: is-managed=false \n Operations: monitor interval=60s (D1-monitor-interval-60s)\n',[output]
-
- output, returnVal = pcs(temp_cib, "resource manage noexist")
- assert returnVal == 1
- assert output == "Error: noexist doesn't exist.\n",[output]
-
- output, returnVal = pcs(temp_cib, "resource manage DGroup")
- assert returnVal == 0
- assert output == '',[output]
-
- output, returnVal = pcs(temp_cib, "resource unmanage DGroup")
- assert returnVal == 0
- assert output == '',[output]
-
- output, returnVal = pcs(temp_cib, "resource show DGroup")
- assert returnVal == 0
- ac (output,' Group: DGroup\n Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: is-managed=false \n Operations: monitor interval=60s (D0-monitor-interval-60s)\n')
-
- output, returnVal = pcs(temp_cib, "resource manage DGroup")
- assert returnVal == 0
- assert output == '',[output]
-
- output, returnVal = pcs(temp_cib, "resource show DGroup")
- assert returnVal == 0
- assert output == ' Group: DGroup\n Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D0-monitor-interval-60s)\n',[output]
-
- def testCloneMasterManage(self):
-# is-managed on the primitive, attempting manage on primitive
- output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
- assert returnVal == 0
- ac (output,'')
-
- output, returnVal = pcs(temp_cib, "resource update clone-unmanage meta is-managed=false")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: is-managed=false \n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
-
- output, returnVal = pcs(temp_cib, "resource manage clone-unmanage")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
- output, returnVal = pcs(temp_cib, "resource delete clone-unmanage")
-
-# is-managed on the clone, attempting manage on primitive
- output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
- ac (output,'')
- assert returnVal == 0
-
- output, returnVal = pcs(temp_cib, "resource update clone-unmanage-clone meta is-managed=false")
- ac(output, '')
- self.assertEqual(0, returnVal)
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Meta Attrs: is-managed=false \n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
-
- output, returnVal = pcs(temp_cib, "resource manage clone-unmanage")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
- pcs(temp_cib, "resource delete clone-unmanage")
-
-# is-managed on the primitive, attempting manage on clone
- output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
- assert returnVal == 0
- ac (output,'')
-
- output, returnVal = pcs(temp_cib, "resource update clone-unmanage meta is-managed=false")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: is-managed=false \n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
-
- output, returnVal = pcs(temp_cib, "resource manage clone-unmanage-clone")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
- pcs(temp_cib, "resource delete clone-unmanage")
-
-# is-managed on the clone, attempting manage on clone
- output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
- assert returnVal == 0
- ac (output,'')
-
- output, returnVal = pcs(temp_cib, "resource update clone-unmanage-clone meta is-managed=false")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Meta Attrs: is-managed=false \n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
-
- output, returnVal = pcs(temp_cib, "resource manage clone-unmanage-clone")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
- assert returnVal == 0
- ac (output, ' Clone: clone-unmanage-clone\n Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
-
- output, returnVal = pcs(temp_cib, "resource create master-unmanage ocf:pacemaker:Stateful --master --no-default-ops")
- ac (output,'')
- assert returnVal == 0
-
- output, returnVal = pcs(temp_cib, "resource update master-unmanage-master meta is-managed=false")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show master-unmanage-master")
- assert returnVal == 0
- ac (output, ' Master: master-unmanage-master\n Meta Attrs: is-managed=false \n Resource: master-unmanage (class=ocf provider=pacemaker type=Stateful)\n Operations: monitor interval=60s (master-unmanage-monitor-interval-60s)\n')
-
- output, returnVal = pcs(temp_cib, "resource manage master-unmanage")
- assert returnVal == 0
- ac (output, '')
-
- output, returnVal = pcs(temp_cib, "resource show master-unmanage-master")
+ ac(output, outdent(
+ """\
+ Removing group: dummies (and all resources within group)
+ Stopping all resources in group: dummies...
+ Deleting Resource (and group and M/S) - dummylarge
+ """
+ ))
assert returnVal == 0
- ac (output, ' Master: master-unmanage-master\n Resource: master-unmanage (class=ocf provider=pacemaker type=Stateful)\n Operations: monitor interval=60s (master-unmanage-monitor-interval-60s)\n')
-
- def testGroupManage(self):
- o, r = pcs(temp_cib, "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group AG")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource create --no-default-ops D2 ocf:heartbeat:Dummy --group AG")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource --full")
- self.assertEqual(r, 0)
- ac(o," Group: AG\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
-
- o, r = pcs(temp_cib, "resource unmanage AG")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource --full")
- self.assertEqual(r, 0)
- ac(o," Group: AG\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: is-managed=false \n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: is-managed=false \n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
-
- o, r = pcs(temp_cib, "resource manage AG")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource --full")
- self.assertEqual(r, 0)
- ac(o," Group: AG\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
-
- o, r = pcs(temp_cib, "resource unmanage D2")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource --full")
- self.assertEqual(r, 0)
- ac(o," Group: AG\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: is-managed=false \n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
-
- o, r = pcs(temp_cib, "resource manage AG")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource --full")
- self.assertEqual(r, 0)
- ac(o," Group: AG\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
-
- o, r = pcs(temp_cib, "resource unmanage D2")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource unmanage D1")
- self.assertEqual(r, 0)
- ac(o,"")
-
- os.system("CIB_file="+temp_cib+" crm_resource --resource AG --set-parameter is-managed --meta --parameter-value false --force > /dev/null")
-
- o, r = pcs(temp_cib, "resource --full")
- self.assertEqual(r, 0)
- ac(o,"""\
- Group: AG
- Meta Attrs: is-managed=false
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
- Meta Attrs: is-managed=false
- Operations: monitor interval=60s (D1-monitor-interval-60s)
- Resource: D2 (class=ocf provider=heartbeat type=Dummy)
- Meta Attrs: is-managed=false
- Operations: monitor interval=60s (D2-monitor-interval-60s)
-""")
-
-
- o, r = pcs(temp_cib, "resource manage AG")
- self.assertEqual(r, 0)
- ac(o,"")
-
- o, r = pcs(temp_cib, "resource --full")
- self.assertEqual(r, 0)
- ac(o," Group: AG\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
-
- def testMasterMetaCreate(self):
- o,r = pcs('resource create --no-default-ops F0 ocf:heartbeat:Dummy op monitor interval=10s role=Master op monitor interval=20s role=Slave --master meta notify=true')
- ac (o,"")
- assert r==0
-
- o,r = pcs("resource --full")
- ac (o," Master: F0-master\n Meta Attrs: notify=true \n Resource: F0 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=10s role=Master (F0-monitor-interval-10s)\n monitor interval=20s role=Slave (F0-monitor-interval-20s)\n")
- assert r==0
def testBadInstanceVariables(self):
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops D0 ocf:heartbeat:Dummy test=testC test2=test2a op monitor interval=35 meta test7=test7a test6=")
- assert returnVal == 1
- assert output == "Error: resource option(s): 'test, test2', are not recognized for resource type: 'ocf:heartbeat:Dummy' (use --force to override)\n", [output]
-
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D0 ocf:heartbeat:Dummy test=testC test2=test2a test4=test4A op monitor interval=35 meta test7=test7a test6=")
- assert returnVal == 0
- assert output == "", [output]
+ self.assert_pcs_success(
+ "resource create --no-default-ops --force D0 ocf:heartbeat:Dummy"
+ " test=testC test2=test2a test4=test4A op monitor interval=35"
+ " meta test7=test7a test6="
+ ,
+ "Warning: invalid resource options: 'test', 'test2', 'test4',"
+ " allowed options are: fake, state, trace_file, trace_ra\n"
+ )
output, returnVal = pcs(temp_cib, "resource update D0 test=testA test2=testB")
assert returnVal == 1
@@ -2240,23 +1807,35 @@ Deleting Resource (and group and M/S) - dummylarge
assert returnVal == 0
assert output == "", [output]
- output, returnVal = pcs(temp_cib, "resource show D0")
- ac(output, """\
- Resource: D0 (class=ocf provider=heartbeat type=Dummy)
- Attributes: test=testB test2=testC test4=test4A test3=testD
- Meta Attrs: test7=test7a test6=
- Operations: monitor interval=35 (D0-monitor-interval-35)
-""")
+ self.assert_pcs_success("resource show D0", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: D0 (class=ocf provider=heartbeat type=Dummy)
+ Attributes: test=testB test2=testC test4=test4A test3=testD
+ Meta Attrs: test6= test7=test7a
+ Operations: monitor interval=35 (D0-monitor-interval-35)
+ """
+ ))
assert returnVal == 0
def testMetaAttrs(self):
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D0 ocf:heartbeat:Dummy test=testA test2=test2a op monitor interval=30 meta test5=test5a test6=test6a")
- assert returnVal == 0
- assert output == "", [output]
+ # see also BundleMiscCommands
+ self.assert_pcs_success(
+ "resource create --no-default-ops --force D0 ocf:heartbeat:Dummy"
+ " test=testA test2=test2a op monitor interval=30 meta"
+ " test5=test5a test6=test6a"
+ ,
+ "Warning: invalid resource options: 'test', 'test2', allowed"
+ " options are: fake, state, trace_file, trace_ra\n"
+ )
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D1 ocf:heartbeat:Dummy test=testA test2=test2a op monitor interval=30")
- assert returnVal == 0
- assert output == "", [output]
+ self.assert_pcs_success(
+ "resource create --no-default-ops --force D1 ocf:heartbeat:Dummy"
+ " test=testA test2=test2a op monitor interval=30"
+ ,
+ "Warning: invalid resource options: 'test', 'test2', allowed"
+ " options are: fake, state, trace_file, trace_ra\n"
+ )
output, returnVal = pcs(temp_cib, "resource update --force D0 test=testC test2=test2a op monitor interval=35 meta test7=test7a test6=")
assert returnVal == 0
@@ -2312,17 +1891,26 @@ Deleting Resource (and group and M/S) - dummylarge
assert returnVal == 0
assert output == "", [output]
- output, returnVal = pcs(temp_cib, "resource --full")
- assert returnVal == 0
- assert output == ' Master: GroupMaster\n Group: Group\n Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D0-monitor-interval-60s)\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n', [output]
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Master: GroupMaster
+ Group: Group
+ Resource: D0 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D0-monitor-interval-10)
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
- output, returnVal = pcs(temp_cib, "resource delete D0")
- assert returnVal == 0
- assert output == "Deleting Resource - D0\n", [output]
+ self.assert_pcs_success(
+ "resource delete D0",
+ "Deleting Resource - D0\n"
+ )
- output, returnVal = pcs(temp_cib, "resource delete D1")
- assert returnVal == 0
- assert output == 'Deleting Resource (and group and M/S) - D1\n', [output]
+ self.assert_pcs_success(
+ "resource delete D1",
+ "Deleting Resource (and group and M/S) - D1\n"
+ )
def testUncloneWithConstraints(self):
o,r = pcs(
@@ -2349,6 +1937,7 @@ Deleting Resource (and group and M/S) - dummylarge
assert r == 0
def testUnclone(self):
+ # see also BundleCloneMaster
output, returnVal = pcs(
temp_cib,
"resource create --no-default-ops dummy1 ocf:heartbeat:Dummy"
@@ -2381,30 +1970,30 @@ Deleting Resource (and group and M/S) - dummylarge
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Clone: dummy2-clone
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Clone: dummy2-clone
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone dummy2")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
# unclone with a clone itself specified
output, returnVal = pcs(temp_cib, "resource group add gr dummy2")
@@ -2415,119 +2004,118 @@ Deleting Resource (and group and M/S) - dummylarge
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Clone: gr-clone
- Group: gr
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Clone: gr-clone
+ Group: gr
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone gr-clone")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
# unclone with a cloned group specified
output, returnVal = pcs(temp_cib, "resource clone gr")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Clone: gr-clone
- Group: gr
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Clone: gr-clone
+ Group: gr
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone gr")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
# unclone with a cloned grouped resource specified
output, returnVal = pcs(temp_cib, "resource clone gr")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Clone: gr-clone
- Group: gr
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Clone: gr-clone
+ Group: gr
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
+
self.assertEqual(0, returnVal)
output, returnVal = pcs(temp_cib, "resource unclone dummy1")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Clone: gr-clone
- Group: gr
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Clone: gr-clone
+ Group: gr
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone dummy2")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ """
+ ))
def testUncloneMaster(self):
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops dummy1 ocf:pacemaker:Stateful"
+ # see also BundleCloneMaster
+ self.assert_pcs_success(
+ "resource create --no-default-ops dummy1 ocf:pacemaker:Stateful",
+ "Warning: changing a monitor operation interval from 10 to 11 to make the operation unique\n"
)
- ac(output, "")
- self.assertEqual(0, returnVal)
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops dummy2 ocf:pacemaker:Stateful"
+ self.assert_pcs_success(
+ "resource create --no-default-ops dummy2 ocf:pacemaker:Stateful",
+ "Warning: changing a monitor operation interval from 10 to 11 to make the operation unique\n"
)
- ac(output, "")
- self.assertEqual(0, returnVal)
# try to unclone a non-cloned resource
output, returnVal = pcs(temp_cib, "resource unclone dummy1")
@@ -2547,30 +2135,34 @@ Deleting Resource (and group and M/S) - dummylarge
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Master: dummy2-master
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Master: dummy2-master
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone dummy2")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
# unclone with a clone itself specified
output, returnVal = pcs(temp_cib, "resource group add gr dummy2")
@@ -2581,60 +2173,68 @@ Deleting Resource (and group and M/S) - dummylarge
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Master: gr-master
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Master: gr-master
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone gr-master")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
# unclone with a cloned group specified
output, returnVal = pcs(temp_cib, "resource master gr")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Master: gr-master
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Master: gr-master
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone gr")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
# unclone with a cloned grouped resource specified
output, returnVal = pcs(temp_cib, "resource ungroup gr dummy2")
@@ -2645,29 +2245,33 @@ Deleting Resource (and group and M/S) - dummylarge
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
- Master: gr-master
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ Master: gr-master
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone dummy1")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource group add gr dummy1 dummy2")
ac(output, "")
@@ -2677,31 +2281,35 @@ Deleting Resource (and group and M/S) - dummylarge
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Master: gr-master
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Master: gr-master
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone dummy2")
ac(output, "Error: Groups that have more than one resource and are master/slave resources cannot be removed. The group may be deleted with 'pcs resource delete gr'.\n")
self.assertEqual(1, returnVal)
- output, returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Master: gr-master
- Group: gr
- Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy1-monitor-interval-60s)
- Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Master: gr-master
+ Group: gr
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy1-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy1-monitor-interval-11)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy2-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy2-monitor-interval-11)
+ """
+ ))
def testCloneGroupMember(self):
o,r = pcs(
@@ -2774,24 +2382,8 @@ Deleting Resource (and group and M/S) - dummylarge
ac(o," Clone Set: D0-clone [D0]\n Clone Set: D1-clone [D1]\n Master/Slave Set: D2-master [D2]\n Master/Slave Set: D3-master [D3]\n")
assert r == 0
- def testResourceCreationWithGroupOperations(self):
- o,r = pcs(temp_cib, "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group AG2 op monitor interval=32s")
- ac(o,"")
- assert r == 0
-
- o,r = pcs(temp_cib, "resource create --no-default-ops D3 ocf:heartbeat:Dummy op monitor interval=34s --group AG2 ")
- ac(o,"")
- assert r == 0
-
- o,r = pcs(temp_cib, "resource create --no-default-ops D4 ocf:heartbeat:Dummy op monitor interval=35s --group=AG2 ")
- ac(o,"")
- assert r == 0
-
- o,r = pcs(temp_cib, "resource --full")
- ac(o," Group: AG2\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=32s (D1-monitor-interval-32s)\n Resource: D3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=34s (D3-monitor-interval-34s)\n Resource: D4 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=35s (D4-monitor-interval-35s)\n")
- assert r == 0
-
def testCloneMaster(self):
+ # see also BundleCloneMaster
output, returnVal = pcs(
temp_cib,
"resource create --no-default-ops D0 ocf:heartbeat:Dummy"
@@ -2831,17 +2423,29 @@ Deleting Resource (and group and M/S) - dummylarge
assert returnVal == 0
assert output == "", [output]
- output, returnVal = pcs(temp_cib, "resource show --full")
- assert returnVal == 0
- assert output == ' Clone: D0-clone\n Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D0-monitor-interval-60s)\n Master: D1-master-custom\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Master: D2-master\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n', [output]
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Clone: D0-clone
+ Resource: D0 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D0-monitor-interval-10)
+ Master: D1-master-custom
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Master: D2-master
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ """
+ ))
- output, returnVal = pcs(temp_cib, "resource delete D0")
- assert returnVal == 0
- assert output == "Deleting Resource - D0\n", [output]
+ self.assert_pcs_success(
+ "resource delete D0",
+ "Deleting Resource - D0\n"
+ )
- output, returnVal = pcs(temp_cib, "resource delete D2")
- assert returnVal == 0
- assert output == "Deleting Resource - D2\n", [output]
+ self.assert_pcs_success(
+ "resource delete D2",
+ "Deleting Resource - D2\n"
+ )
output, returnVal = pcs(
temp_cib,
@@ -2852,9 +2456,17 @@ Deleting Resource (and group and M/S) - dummylarge
"resource create --no-default-ops D2 ocf:heartbeat:Dummy"
)
- output, returnVal = pcs(temp_cib, "resource show --full")
- assert returnVal == 0
- assert output == " Master: D1-master-custom\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D0-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n", [output]
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Master: D1-master-custom
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Resource: D0 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D0-monitor-interval-10)
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ """
+ ))
def testLSBResource(self):
output, returnVal = pcs(
@@ -3008,7 +2620,6 @@ Ticket Constraints:
""")
self.assertEqual(0, returnVal)
-
output, returnVal = pcs(temp_cib, "resource ban dummy rh7-1 rh7-1")
self.assertEqual(1, returnVal)
@@ -3142,9 +2753,18 @@ Ticket Constraints:
ac(output,"Error: error moving/banning/clearing resource\nResource 'D2-master' not moved: active in 0 locations (promoted in 0).\nYou can prevent 'D2-master' from running on a specific location with: --ban --host <name>\nYou can prevent 'D2-master' from being promoted at a specific location with: --ban --master --host <name>\nError performing operation: Invalid argument\n\n")
assert returnVal == 1
- output, returnVal = pcs(temp_cib, "resource --full")
- assert returnVal == 0
- assert output == ' Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D0-monitor-interval-60s)\n Clone: D1-clone\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Master: D2-master\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n', [output]
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: D0 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D0-monitor-interval-10)
+ Clone: D1-clone
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Master: D2-master
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ """
+ ))
def testMasterOfGroupMove(self):
o,r = pcs(
@@ -3255,102 +2875,24 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
assert r == 0
ac(o,"Location Constraints:\n Resource: DGroup\n Enabled on: rh7-1 (score:INFINITY) (role: Started)\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
- o,r = pcs(temp_cib, "resource delete D1")
- ac(o,"Deleting Resource - D1\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource delete D1",
+ "Deleting Resource - D1\n"
+ )
- o,r = pcs(temp_cib, "resource delete D2")
- ac(o,"Removing Constraint - cli-prefer-DGroup\nDeleting Resource (and group) - D2\n")
- assert r == 0
+ self.assert_pcs_success("resource delete D2", outdent(
+ """\
+ Removing Constraint - cli-prefer-DGroup
+ Deleting Resource (and group) - D2
+ """
+ ))
o,r = pcs(temp_cib, "resource show")
assert r == 0
ac(o,"NO resources configured\n")
def testResourceCloneCreation(self):
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops D1 ocf:heartbeat:Dummy --clone")
- assert returnVal == 0
- assert output == "", [output]
-
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops D2 ocf:heartbeat:Dummy --clone")
- assert returnVal == 0
- assert output == "", [output]
-
- output, returnVal = pcs(temp_cib, "resource create --no-default-ops D3 ocf:heartbeat:Dummy --clone globaly-unique=true")
- assert returnVal == 0
- assert output == "", [output]
-
- output, returnVal = pcs(temp_cib, "resource --full")
- assert returnVal == 0
- ac(output,' Clone: D1-clone\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n Clone: D2-clone\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D2-monitor-interval-60s)\n Clone: D3-clone\n Meta Attrs: globaly-unique=true \n Resource: D3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D3-monitor-interval-60s)\n')
-
- output, returnVal = pcs(temp_cib, "resource delete D1")
- assert returnVal == 0
- assert output == "Deleting Resource - D1\n", [output]
-
- output, returnVal = pcs(temp_cib, "resource delete D2")
- assert returnVal == 0
- assert output == "Deleting Resource - D2\n", [output]
-
- output, returnVal = pcs(temp_cib, "resource delete D3")
- assert returnVal == 0
- assert output == "Deleting Resource - D3\n", [output]
-
- output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s --clone meta interleave=true clone-node-max=1 ordered=true")
- assert output == "", [output]
- assert returnVal == 0
-
- output,returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Clone: dlm-clone
- Meta Attrs: clone-node-max=1 interleave=true ordered=true
- Resource: dlm (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=10s (dlm-monitor-interval-10s)
-""")
- self.assertEqual(0, returnVal)
-
- output, returnVal = pcs(temp_cib, "resource delete dlm")
- assert returnVal == 0
- assert output == "Deleting Resource - dlm\n", [output]
-
- output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
- assert output == "", [output]
- assert returnVal == 0
-
- output,returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Clone: dlm-clone
- Meta Attrs: clone-node-max=1 interleave=true ordered=true
- Resource: dlm (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=10s (dlm-monitor-interval-10s)
-""")
- self.assertEqual(0, returnVal)
-
- output, returnVal = pcs(temp_cib, "resource delete dlm")
- assert returnVal == 0
- assert output == "Deleting Resource - dlm\n", [output]
-
- output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
- assert returnVal == 0
- assert output == "", [output]
-
- output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
- assert returnVal == 1
- assert output == "Error: unable to create resource/fence device 'dlm', 'dlm' already exists on this system\n", [output]
-
- output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm-clone ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
- assert returnVal == 1
- assert output == "Error: unable to create resource/fence device 'dlm-clone', 'dlm-clone' already exists on this system\n", [output]
-
- output,returnVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Clone: dlm-clone
- Meta Attrs: clone-node-max=1 interleave=true ordered=true
- Resource: dlm (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=10s (dlm-monitor-interval-10s)
-""")
- self.assertEqual(0, returnVal)
-
+ #resource "dummy1" is already in "temp_large_cib
output, returnVal = pcs(temp_large_cib, "resource clone dummy1")
ac(output, '')
assert returnVal == 0
@@ -3378,19 +2920,20 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Resource: dummy-clone (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-clone-monitor-interval-60s)
- Clone: dummy-clone-1
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Resource: dummy-clone (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-clone-monitor-interval-10)
+ Clone: dummy-clone-1
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
- output, returnVal = pcs(temp_cib, "resource delete dummy")
- ac(output, "Deleting Resource - dummy\n")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success(
+ "resource delete dummy",
+ "Deleting Resource - dummy\n"
+ )
output, returnVal = pcs(
temp_cib,
@@ -3399,15 +2942,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Resource: dummy-clone (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-clone-monitor-interval-60s)
- Clone: dummy-clone-1
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Resource: dummy-clone (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-clone-monitor-interval-10)
+ Clone: dummy-clone-1
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
def testResourceMasterId(self):
output, returnVal = pcs(
@@ -3428,15 +2971,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-master-monitor-interval-60s)
- Master: dummy-master-1
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-master-monitor-interval-10)
+ Master: dummy-master-1
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource unclone dummy")
ac(output, "")
@@ -3450,19 +2993,20 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-master-monitor-interval-60s)
- Master: dummy-master0
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-master-monitor-interval-10)
+ Master: dummy-master0
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
- output, returnVal = pcs(temp_cib, "resource delete dummy")
- ac(output, "Deleting Resource - dummy\n")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success(
+ "resource delete dummy",
+ "Deleting Resource - dummy\n"
+ )
output, returnVal = pcs(
temp_cib,
@@ -3471,15 +3015,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-master-monitor-interval-60s)
- Master: dummy-master-1
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-master-monitor-interval-10)
+ Master: dummy-master-1
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
def testResourceCloneUpdate(self):
o, r = pcs(
@@ -3489,33 +3033,53 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
assert r == 0
ac(o, "")
- o, r = pcs(temp_cib, "resource --full")
- assert r == 0
- ac(o, ' Clone: D1-clone\n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n')
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Clone: D1-clone
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
o, r = pcs(temp_cib, 'resource update D1-clone foo=bar')
ac(o, "")
self.assertEqual(0, r)
- o, r = pcs(temp_cib, "resource --full")
- assert r == 0
- ac(o, ' Clone: D1-clone\n Meta Attrs: foo=bar \n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n')
+ self.assert_pcs_success("resource --full", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Clone: D1-clone
+ Meta Attrs: foo=bar
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
- o, r = pcs(temp_cib, 'resource update D1-clone bar=baz')
- assert r == 0
- ac(o, "")
+ self.assert_pcs_success("resource update D1-clone bar=baz")
- o, r = pcs(temp_cib, "resource --full")
- assert r == 0
- ac(o, ' Clone: D1-clone\n Meta Attrs: foo=bar bar=baz \n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n')
+ self.assert_pcs_success("resource --full", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Clone: D1-clone
+ Meta Attrs: foo=bar bar=baz
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
o, r = pcs(temp_cib, 'resource update D1-clone foo=')
assert r == 0
ac(o, "")
- o, r = pcs(temp_cib, "resource --full")
- assert r == 0
- ac(o, ' Clone: D1-clone\n Meta Attrs: bar=baz \n Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n')
+ self.assert_pcs_success("resource --full", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Clone: D1-clone
+ Meta Attrs: bar=baz
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
def testGroupRemoveWithConstraints2(self):
o,r = pcs(
@@ -3538,9 +3102,14 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
ac(o,"Removing Constraint - location-AG-rh7-1-INFINITY\n")
assert r == 0
- o,r = pcs(temp_cib, "resource --full")
- ac(o, " Resource: A (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A-monitor-interval-60s)\n Resource: B (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (B-monitor-interval-60s)\n")
- assert r == 0
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: A (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
+ Resource: B (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (B-monitor-interval-10)
+ """
+ ))
o,r = pcs(
temp_cib,
@@ -3557,16 +3126,17 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
o,r = pcs(temp_cib, "constraint location AA-master prefers rh7-1")
assert r == 0
- o,r = pcs(temp_cib, "resource delete A1")
- ac(o,"Deleting Resource - A1\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource delete A1",
+ "Deleting Resource - A1\n"
+ )
- o,r = pcs(temp_cib, "resource delete A2")
- ac(o,"""\
-Removing Constraint - location-AA-master-rh7-1-INFINITY
-Deleting Resource (and group and M/S) - A2
-""")
- assert r == 0
+ self.assert_pcs_success("resource delete A2", outdent(
+ """\
+ Removing Constraint - location-AA-master-rh7-1-INFINITY
+ Deleting Resource (and group and M/S) - A2
+ """
+ ))
def testMasteredGroup(self):
o,r = pcs(
@@ -3590,32 +3160,20 @@ Deleting Resource (and group and M/S) - A2
o,r = pcs(temp_cib, "resource master AGMaster AG")
assert r == 0
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops A ocf:heartbeat:Dummy"
+ self.assert_pcs_fail(
+ "resource create --no-default-ops A ocf:heartbeat:Dummy",
+ "Error: 'A' already exists\n"
)
- ac(output, """\
-Error: unable to create resource/fence device 'A', 'A' already exists on this system
-""")
- self.assertEqual(1, returnVal)
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops AG ocf:heartbeat:Dummy"
+ self.assert_pcs_fail(
+ "resource create --no-default-ops AG ocf:heartbeat:Dummy",
+ "Error: 'AG' already exists\n"
)
- ac(output, """\
-Error: unable to create resource/fence device 'AG', 'AG' already exists on this system
-""")
- self.assertEqual(1, returnVal)
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops AGMaster ocf:heartbeat:Dummy"
+ self.assert_pcs_fail(
+ "resource create --no-default-ops AGMaster ocf:heartbeat:Dummy",
+ "Error: 'AGMaster' already exists\n"
)
- ac(output, """\
-Error: unable to create resource/fence device 'AGMaster', 'AGMaster' already exists on this system
-""")
- self.assertEqual(1, returnVal)
o,r = pcs(temp_cib, "resource ungroup AG")
ac(o,"Error: Groups that have more than one resource and are master/slave resources cannot be removed. The group may be deleted with 'pcs resource delete AG'.\n")
@@ -3630,9 +3188,13 @@ Error: unable to create resource/fence device 'AGMaster', 'AGMaster' already exi
ac(o,"")
assert r == 0
- o,r = pcs(temp_cib, "resource show --full")
- ac(o," Master: AGMaster\n Resource: A (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (A-monitor-interval-60s)\n")
- assert r == 0
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Master: AGMaster
+ Resource: A (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
+ """
+ ))
def testClonedGroup(self):
output, returnVal = pcs(
@@ -3653,43 +3215,29 @@ Error: unable to create resource/fence device 'AGMaster', 'AGMaster' already exi
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Clone: DG-clone
- Group: DG
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (D1-monitor-interval-60s)
- Resource: D2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (D2-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Clone: DG-clone
+ Group: DG
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ """
+ ))
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
+ self.assert_pcs_fail(
+ "resource create --no-default-ops D1 ocf:heartbeat:Dummy",
+ "Error: 'D1' already exists\n"
)
- ac(output, """\
-Error: unable to create resource/fence device 'D1', 'D1' already exists on this system
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops DG ocf:heartbeat:Dummy"
+ self.assert_pcs_fail(
+ "resource create --no-default-ops DG ocf:heartbeat:Dummy",
+ "Error: 'DG' already exists\n"
)
- ac(output, """\
-Error: unable to create resource/fence device 'DG', 'DG' already exists on this system
-""")
- self.assertEqual(1, returnVal)
-
- output, returnVal = pcs(
- temp_cib,
- "resource create --no-default-ops DG-clone ocf:heartbeat:Dummy"
+ self.assert_pcs_fail(
+ "resource create --no-default-ops DG-clone ocf:heartbeat:Dummy",
+ "Error: 'DG-clone' already exists\n"
)
- ac(output, """\
-Error: unable to create resource/fence device 'DG-clone', 'DG-clone' already exists on this system
-""")
- self.assertEqual(1, returnVal)
output, returnVal = pcs(temp_cib, "resource ungroup DG")
ac(output, """\
@@ -3707,32 +3255,38 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Clone: DG-clone
- Group: DG
- Resource: D2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (D2-monitor-interval-60s)
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (D1-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Clone: DG-clone
+ Group: DG
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
output, returnVal = pcs(temp_cib, "resource ungroup DG")
ac(output, "")
self.assertEqual(0, returnVal)
- output, returnVal = pcs(temp_cib, "resource show --full")
- ac(output, """\
- Clone: DG-clone
- Resource: D2 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (D2-monitor-interval-60s)
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (D1-monitor-interval-60s)
-""")
- self.assertEqual(0, returnVal)
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Clone: DG-clone
+ Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D2-monitor-interval-10)
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
def testResourceEnable(self):
+ # These tests were moved to
+ # pcs/lib/commands/test/resource/test_resource_enable_disable.py .
+ # However those test the pcs library. I'm leaving these tests here to
+ # test the cli part for now.
+
+ # see also BundleMiscCommands
o,r = pcs(
temp_cib,
"resource create --no-default-ops D1 ocf:heartbeat:Dummy"
@@ -3745,32 +3299,33 @@ Error: Cannot remove more than one resource from cloned group
ac(o,"")
assert r == 0
- o,r = pcs(temp_cib, "resource show D1")
- ac(o, """\
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
- Meta Attrs: target-role=Stopped
- Operations: monitor interval=60s (D1-monitor-interval-60s)
-""")
- assert r == 0
+ self.assert_pcs_success("resource show D1", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
o,r = pcs(temp_cib, "resource enable D1")
ac(o,"")
assert r == 0
- o,r = pcs(temp_cib, "resource show D1")
- ac(o, """\
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (D1-monitor-interval-60s)
-""")
- assert r == 0
+ self.assert_pcs_success("resource show D1", outdent(
+ """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ """
+ ))
# bad resource name
o,r = pcs(temp_cib, "resource enable NoExist")
- ac(o,"Error: unable to find a resource/clone/master/group: NoExist\n")
+ ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
assert r == 1
o,r = pcs(temp_cib, "resource disable NoExist")
- ac(o,"Error: unable to find a resource/clone/master/group: NoExist\n")
+ ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
assert r == 1
# cloned group
@@ -3783,14 +3338,28 @@ Error: Cannot remove more than one resource from cloned group
output, retVal = pcs(temp_cib, "resource clone group0")
ac(output, "")
assert retVal == 0
- output, retVal = pcs(temp_cib, "resource show group0-clone")
- ac(output," Clone: group0-clone\n Group: group0\n Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)\n Operations: start interval=0s timeout=20 (dummy0-start-interval-0s)\n stop interval=0s timeout=20 (dummy0-stop-interval-0s)\n monitor interval=10 timeout=20 (dummy0-monitor-interval-10)\n")
+ self.assert_pcs_success("resource show group0-clone", outdent(
+ """\
+ Clone: group0-clone
+ Group: group0
+ Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
+ start interval=0s timeout=20 (dummy0-start-interval-0s)
+ stop interval=0s timeout=20 (dummy0-stop-interval-0s)
+ """
+ ))
assert retVal == 0
output, retVal = pcs(temp_cib, "resource disable group0")
ac(output, "")
assert retVal == 0
def testResourceEnableUnmanaged(self):
+ # These tests were moved to
+ # pcs/lib/commands/test/resource/test_resource_enable_disable.py .
+ # However those test the pcs library. I'm leaving these tests here to
+ # test the cli part for now.
+
+ # see also BundleMiscCommands
o,r = pcs(
temp_cib,
"resource create --no-default-ops D1 ocf:heartbeat:Dummy"
@@ -3883,6 +3452,10 @@ Error: Cannot remove more than one resource from cloned group
assert r == 0
def testResourceEnableClone(self):
+ # These tests were moved to
+ # pcs/lib/commands/test/resource/test_resource_enable_disable.py .
+ # However those test the pcs library. I'm leaving these tests here to
+ # test the cli part for now.
output, retVal = pcs(
temp_cib,
"resource create --no-default-ops dummy ocf:heartbeat:Dummy --clone"
@@ -3899,13 +3472,13 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ """\
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
# disable clone, enable primitive
output, retVal = pcs(temp_cib, "resource disable dummy-clone")
@@ -3916,13 +3489,13 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ """\
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
# disable both primitive and clone, enable clone
output, retVal = pcs(temp_cib, "resource disable dummy-clone")
@@ -3937,13 +3510,13 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ """\
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
# disable both primitive and clone, enable primitive
output, retVal = pcs(temp_cib, "resource disable dummy-clone")
@@ -3958,37 +3531,40 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ """\
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
# disable via 'resource disable', enable via 'resource meta'
output, retVal = pcs(temp_cib, "resource disable dummy-clone")
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Meta Attrs: target-role=Stopped
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Clone: dummy-clone
+ Meta Attrs: target-role=Stopped
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
output, retVal = pcs(temp_cib, "resource meta dummy-clone target-role=")
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ """\
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
# disable via 'resource meta', enable via 'resource enable'
output, retVal = pcs(
@@ -3997,32 +3573,37 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Meta Attrs: target-role=Stopped
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Clone: dummy-clone
+ Meta Attrs: target-role=Stopped
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
output, retVal = pcs(temp_cib, "resource enable dummy-clone")
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-clone")
- ac(output, """\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-clone", outdent(
+ """\
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ """
+ ))
def testResourceEnableMaster(self):
- output, retVal = pcs(
- temp_cib,
- "resource create --no-default-ops dummy ocf:pacemaker:Stateful --master"
+ # These tests were moved to
+ # pcs/lib/commands/test/resource/test_resource_enable_disable.py .
+ # However those test the pcs library. I'm leaving these tests here to
+ # test the cli part for now.
+ self.assert_pcs_success(
+ "resource create --no-default-ops dummy ocf:pacemaker:Stateful --master",
+ "Warning: changing a monitor operation interval from 10 to 11 to make the operation unique\n"
)
- ac(output, "")
- self.assertEqual(retVal, 0)
# disable primitive, enable master
output, retVal = pcs(temp_cib, "resource disable dummy")
@@ -4033,13 +3614,14 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ """\
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
# disable master, enable primitive
output, retVal = pcs(temp_cib, "resource disable dummy-master")
@@ -4050,13 +3632,14 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ """\
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
# disable both primitive and master, enable master
output, retVal = pcs(temp_cib, "resource disable dummy-master")
@@ -4071,13 +3654,14 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ """\
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
# disable both primitive and master, enable primitive
output, retVal = pcs(temp_cib, "resource disable dummy-master")
@@ -4092,37 +3676,43 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
- self.assertEqual(retVal, 0)
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ """\
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
# disable via 'resource disable', enable via 'resource meta'
output, retVal = pcs(temp_cib, "resource disable dummy-master")
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Meta Attrs: target-role=Stopped
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Master: dummy-master
+ Meta Attrs: target-role=Stopped
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
output, retVal = pcs(temp_cib, "resource meta dummy-master target-role=")
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ """\
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
# disable via 'resource meta', enable via 'resource enable'
output, retVal = pcs(
@@ -4131,34 +3721,153 @@ Error: Cannot remove more than one resource from cloned group
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Meta Attrs: target-role=Stopped
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Master: dummy-master
+ Meta Attrs: target-role=Stopped
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
output, retVal = pcs(temp_cib, "resource enable dummy-master")
ac(output, "")
self.assertEqual(retVal, 0)
- output, retVal = pcs(temp_cib, "resource show dummy-master")
- ac(output, """\
- Master: dummy-master
- Resource: dummy (class=ocf provider=pacemaker type=Stateful)
- Operations: monitor interval=60s (dummy-monitor-interval-60s)
-""")
+ self.assert_pcs_success("resource show dummy-master", outdent(
+ """\
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+ Operations: monitor interval=10 role=Master timeout=20 (dummy-monitor-interval-10)
+ monitor interval=11 role=Slave timeout=20 (dummy-monitor-interval-11)
+ """
+ ))
- def testOPOption(self):
- o,r = pcs(temp_cib, "resource create --no-default-ops A ocf:heartbeat:Dummy op monitor interval=30s blah=blah")
- ac(o,"Error: blah is not a valid op option (use --force to override)\n")
- assert r == 1
+ def test_resource_enable_more_resources(self):
+ # These tests were moved to
+ # pcs/lib/commands/test/resource/test_resource_enable_disable.py .
+ # However those test the pcs library. I'm leaving these tests here to
+ # test the cli part for now.
+ self.assert_pcs_success(
+ "resource create --no-default-ops dummy1 ocf:pacemaker:Dummy"
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops dummy2 ocf:pacemaker:Dummy"
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops dummy3 ocf:pacemaker:Dummy"
+ )
+ self.assert_pcs_success(
+ "resource show --full",
+ outdent(
+ """\
+ Resource: dummy1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ )
+ )
- o,r = pcs(temp_cib, "resource create --no-default-ops A ocf:heartbeat:Dummy op monitor interval=30s op monitor interval=40s blah=blah")
- ac(o,"Error: blah is not a valid op option (use --force to override)\n")
- assert r == 1
+ self.assert_pcs_success("resource disable dummy1 dummy2")
+ self.assert_pcs_success(
+ "resource show --full",
+ outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: dummy1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ )
+ )
+
+ self.assert_pcs_success("resource disable dummy2 dummy3")
+ self.assert_pcs_success(
+ "resource show --full",
+ outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: dummy1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ )
+ )
+
+ self.assert_pcs_success("resource enable dummy1 dummy2")
+ self.assert_pcs_success(
+ "resource show --full",
+ outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: dummy1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ )
+ )
+
+ self.assert_pcs_fail_regardless_of_force(
+ "resource enable dummy3 dummyX",
+ "Error: resource/clone/master/group 'dummyX' does not exist\n"
+ )
+ self.assert_pcs_success(
+ "resource show --full",
+ outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: dummy1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ )
+ )
+
+ self.assert_pcs_fail_regardless_of_force(
+ "resource disable dummy1 dummyX",
+ "Error: resource/clone/master/group 'dummyX' does not exist\n"
+ )
+ self.assert_pcs_success(
+ "resource show --full",
+ outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: dummy1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: target-role=Stopped
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ )
+ )
+ def testOPOption(self):
o,r = pcs(temp_cib, "resource create --no-default-ops B ocf:heartbeat:Dummy")
ac(o,"")
assert r == 0
@@ -4184,9 +3893,14 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
""")
assert returnVal == 1
- o,r = pcs(temp_cib, "resource show --full")
- ac(o," Resource: B (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (B-monitor-interval-60s)\n Resource: C (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (C-monitor-interval-60s)\n")
- assert r == 0
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Resource: B (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (B-monitor-interval-10)
+ Resource: C (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (C-monitor-interval-10)
+ """
+ ))
o,r = pcs(temp_cib, "resource update B op monitor interval=30s monitor interval=31s role=master")
ac(o,"Error: role must be: Stopped, Started, Slave or Master (use --force to override)\n")
@@ -4196,9 +3910,15 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
ac(o,"")
assert r == 0
- o,r = pcs(temp_cib, "resource show --full")
- ac(o," Resource: B (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=30s (B-monitor-interval-30s)\n monitor interval=31s role=Master (B-monitor-interval-31s)\n Resource: C (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (C-monitor-interval-60s)\n")
- assert r == 0
+ self.assert_pcs_success("resource show --full", outdent(
+ """\
+ Resource: B (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=30s (B-monitor-interval-30s)
+ monitor interval=31s role=Master (B-monitor-interval-31s)
+ Resource: C (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (C-monitor-interval-10)
+ """
+ ))
o,r = pcs(temp_cib, "resource update B op interval=5s")
ac(o,"Error: interval=5s does not appear to be a valid operation action\n")
@@ -4262,96 +3982,74 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
ac(o,"")
assert r == 0
- o,r = pcs("resource delete dummy0")
- ac(o,"Deleting Resource (and group and clone) - dummy0\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource delete dummy0",
+ "Deleting Resource (and group and clone) - dummy0\n"
+ )
def testResourceMissingValues(self):
- o,r = pcs("resource create --no-default-ops myip IPaddr2")
- ac(o,"Error: missing required option(s): 'ip' for resource type: ocf:heartbeat:IPaddr2 (use --force to override)\nCreating resource 'ocf:heartbeat:IPaddr2'\n")
- assert r == 1
-
- o,r = pcs("resource create --no-default-ops myip IPaddr2 --force")
- ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
- assert r == 0
-
- o,r = pcs("resource create --no-default-ops myip2 IPaddr2 ip=3.3.3.3")
- ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
- assert r == 0
-
- o,r = pcs("resource create --no-default-ops myfs Filesystem")
- ac(o,"Error: missing required option(s): 'device, directory, fstype' for resource type: ocf:heartbeat:Filesystem (use --force to override)\nCreating resource 'ocf:heartbeat:Filesystem'\n")
- assert r == 1
-
- o,r = pcs("resource create --no-default-ops myfs Filesystem --force")
- ac(o,"Creating resource 'ocf:heartbeat:Filesystem'\n")
- assert r == 0
-
- o,r = pcs("resource create --no-default-ops myfs2 Filesystem device=x directory=y --force")
- ac(o,"Creating resource 'ocf:heartbeat:Filesystem'\n")
- assert r == 0
-
- o,r = pcs("resource create --no-default-ops myfs3 Filesystem device=x directory=y fstype=z")
- ac(o,"Creating resource 'ocf:heartbeat:Filesystem'\n")
- assert r == 0
-
- o,r = pcs("resource --full")
- ac(o, """\
- Resource: myip (class=ocf provider=heartbeat type=IPaddr2)
- Operations: monitor interval=60s (myip-monitor-interval-60s)
- Resource: myip2 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=3.3.3.3
- Operations: monitor interval=60s (myip2-monitor-interval-60s)
- Resource: myfs (class=ocf provider=heartbeat type=Filesystem)
- Operations: monitor interval=60s (myfs-monitor-interval-60s)
- Resource: myfs2 (class=ocf provider=heartbeat type=Filesystem)
- Attributes: device=x directory=y
- Operations: monitor interval=60s (myfs2-monitor-interval-60s)
- Resource: myfs3 (class=ocf provider=heartbeat type=Filesystem)
- Attributes: device=x directory=y fstype=z
- Operations: monitor interval=60s (myfs3-monitor-interval-60s)
-""")
- assert r == 0
-
- def testDefaultOps(self):
- o,r = pcs("resource create X0 ocf:heartbeat:Dummy")
- ac(o,"")
- assert r == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops myip IPaddr2 --force",
+ outdent(
+ """\
+ Assumed agent name 'ocf:heartbeat:IPaddr2' (deduced from 'IPaddr2')
+ Warning: required resource option 'ip' is missing
+ """
+ )
+ )
+ self.assert_pcs_success(
+ "resource create --no-default-ops myip2 IPaddr2 ip=3.3.3.3",
+ "Assumed agent name 'ocf:heartbeat:IPaddr2'"
+ " (deduced from 'IPaddr2')\n"
+ )
- o,r = pcs("resource create X1 ocf:heartbeat:Dummy op monitor interval=90s")
- ac(o,"")
- assert r == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops myfs Filesystem --force",
+ outdent(
+ """\
+ Assumed agent name 'ocf:heartbeat:Filesystem' (deduced from 'Filesystem')
+ Warning: required resource options 'device', 'directory', 'fstype' are missing
+ """
+ )
+ )
- o,r = pcs("resource create X2 IPaddr2 ip=1.1.1.1")
- ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops myfs2 Filesystem device=x"
+ " directory=y --force"
+ ,
+ outdent(
+ """\
+ Assumed agent name 'ocf:heartbeat:Filesystem' (deduced from 'Filesystem')
+ Warning: required resource option 'fstype' is missing
+ """
+ )
+ )
- o,r = pcs("resource create X3 IPaddr2 ip=1.1.1.1 op monitor interval=1s start timeout=1s stop timeout=1s")
- ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
- assert r == 0
+ self.assert_pcs_success(
+ "resource create --no-default-ops myfs3 Filesystem device=x"
+ " directory=y fstype=z"
+ ,
+ "Assumed agent name 'ocf:heartbeat:Filesystem'"
+ " (deduced from 'Filesystem')\n"
+ )
- o,r = pcs("resource --full")
- ac(o, """\
- Resource: X0 (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (X0-start-interval-0s)
- stop interval=0s timeout=20 (X0-stop-interval-0s)
- monitor interval=10 timeout=20 (X0-monitor-interval-10)
- Resource: X1 (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (X1-start-interval-0s)
- stop interval=0s timeout=20 (X1-stop-interval-0s)
- monitor interval=90s (X1-monitor-interval-90s)
- Resource: X2 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=1.1.1.1
- Operations: start interval=0s timeout=20s (X2-start-interval-0s)
- stop interval=0s timeout=20s (X2-stop-interval-0s)
- monitor interval=10s timeout=20s (X2-monitor-interval-10s)
- Resource: X3 (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: ip=1.1.1.1
- Operations: monitor interval=1s (X3-monitor-interval-1s)
- start interval=0s timeout=1s (X3-start-interval-0s)
- stop interval=0s timeout=1s (X3-stop-interval-0s)
-""")
- assert r == 0
+ self.assert_pcs_success("resource --full", outdent(
+ """\
+ Resource: myip (class=ocf provider=heartbeat type=IPaddr2)
+ Operations: monitor interval=10s timeout=20s (myip-monitor-interval-10s)
+ Resource: myip2 (class=ocf provider=heartbeat type=IPaddr2)
+ Attributes: ip=3.3.3.3
+ Operations: monitor interval=10s timeout=20s (myip2-monitor-interval-10s)
+ Resource: myfs (class=ocf provider=heartbeat type=Filesystem)
+ Operations: monitor interval=20 timeout=40 (myfs-monitor-interval-20)
+ Resource: myfs2 (class=ocf provider=heartbeat type=Filesystem)
+ Attributes: device=x directory=y
+ Operations: monitor interval=20 timeout=40 (myfs2-monitor-interval-20)
+ Resource: myfs3 (class=ocf provider=heartbeat type=Filesystem)
+ Attributes: device=x directory=y fstype=z
+ Operations: monitor interval=20 timeout=40 (myfs3-monitor-interval-20)
+ """
+ ))
def testClonedMasteredGroup(self):
output, retVal = pcs(
@@ -4376,9 +4074,19 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
output, retVal = pcs(temp_cib, "resource clone dummies")
ac(output, "")
assert retVal == 0
- output, retVal = pcs(temp_cib, "resource show dummies-clone")
- ac(output, " Clone: dummies-clone\n Group: dummies\n Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy1-monitor-interval-60s)\n Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy2-monitor-interval-60s)\n Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy3-monitor-interval-60s)\n")
- assert retVal == 0
+
+ self.assert_pcs_success("resource show dummies-clone", outdent(
+ """\
+ Clone: dummies-clone
+ Group: dummies
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ ))
output, retVal = pcs(temp_cib, "resource unclone dummies-clone")
ac(output, "")
@@ -4395,13 +4103,29 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
output, retVal = pcs(temp_cib, "resource clone dummies")
ac(output, "")
assert retVal == 0
- output, retVal = pcs(temp_cib, "resource show dummies-clone")
- ac(output, " Clone: dummies-clone\n Group: dummies\n Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy1-monitor-interval-60s)\n Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy2-monitor-interval-60s)\n Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy3-monitor-interval-60s)\n")
- assert retVal == 0
- output, retVal = pcs(temp_cib, "resource delete dummies-clone")
- ac(output, "Removing group: dummies (and all resources within group)\nStopping all resources in group: dummies...\nDeleting Resource - dummy1\nDeleting Resource - dummy2\nDeleting Resource (and group and clone) - dummy3\n")
- assert retVal == 0
+ self.assert_pcs_success("resource show dummies-clone", outdent(
+ """\
+ Clone: dummies-clone
+ Group: dummies
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ ))
+
+ self.assert_pcs_success("resource delete dummies-clone", outdent(
+ """\
+ Removing group: dummies (and all resources within group)
+ Stopping all resources in group: dummies...
+ Deleting Resource - dummy1
+ Deleting Resource - dummy2
+ Deleting Resource (and group and clone) - dummy3
+ """
+ ))
output, retVal = pcs(temp_cib, "resource show")
ac(output, "NO resources configured\n")
assert retVal == 0
@@ -4428,9 +4152,19 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
output, retVal = pcs(temp_cib, "resource master dummies")
ac(output, "")
assert retVal == 0
- output, retVal = pcs(temp_cib, "resource show dummies-master")
- ac(output, " Master: dummies-master\n Group: dummies\n Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy1-monitor-interval-60s)\n Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy2-monitor-interval-60s)\n Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy3-monitor-interval-60s)\n")
- assert retVal == 0
+
+ self.assert_pcs_success("resource show dummies-master", outdent(
+ """\
+ Master: dummies-master
+ Group: dummies
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ ))
output, retVal = pcs(temp_cib, "resource unclone dummies-master")
ac(output, "")
@@ -4447,13 +4181,29 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
output, retVal = pcs(temp_cib, "resource master dummies")
ac(output, "")
assert retVal == 0
- output, retVal = pcs(temp_cib, "resource show dummies-master")
- ac(output, " Master: dummies-master\n Group: dummies\n Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy1-monitor-interval-60s)\n Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy2-monitor-interval-60s)\n Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (dummy3-monitor-interval-60s)\n")
- assert retVal == 0
- output, retVal = pcs(temp_cib, "resource delete dummies-master")
- ac(output, "Removing group: dummies (and all resources within group)\nStopping all resources in group: dummies...\nDeleting Resource - dummy1\nDeleting Resource - dummy2\nDeleting Resource (and group and M/S) - dummy3\n")
- assert retVal == 0
+ self.assert_pcs_success("resource show dummies-master", outdent(
+ """\
+ Master: dummies-master
+ Group: dummies
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy1-monitor-interval-10)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy2-monitor-interval-10)
+ Resource: dummy3 (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy3-monitor-interval-10)
+ """
+ ))
+
+ self.assert_pcs_success("resource delete dummies-master", outdent(
+ """\
+ Removing group: dummies (and all resources within group)
+ Stopping all resources in group: dummies...
+ Deleting Resource - dummy1
+ Deleting Resource - dummy2
+ Deleting Resource (and group and M/S) - dummy3
+ """
+ ))
output, retVal = pcs(temp_cib, "resource show")
ac(output, "NO resources configured\n")
assert retVal == 0
@@ -4498,24 +4248,27 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
self.assertEqual(0, retVal)
ac(output, "")
- status = """\
- Resource: D1 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (D1-monitor-interval-60s)
- Group: GR
- Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DG1-monitor-interval-60s)
- Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DG2-monitor-interval-60s)
- Clone: DC-clone
- Resource: DC (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DC-monitor-interval-60s)
- Clone: GRC-clone
- Group: GRC
- Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DGC1-monitor-interval-60s)
- Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DGC2-monitor-interval-60s)
-"""
+ status = outdent(
+ """\
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Group: GR
+ Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DG1-monitor-interval-10)
+ Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DG2-monitor-interval-10)
+ Clone: DC-clone
+ Resource: DC (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DC-monitor-interval-10)
+ Clone: GRC-clone
+ Group: GRC
+ Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DGC1-monitor-interval-10)
+ Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DGC2-monitor-interval-10)
+ """
+ )
+
cib_original, retVal = pcs(temp_cib, "cluster cib")
self.assertEqual(0, retVal)
@@ -4537,36 +4290,38 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
self.assertEqual(0, retVal)
with open(temp_cib, "w") as f:
f.write(cib_out.toxml())
- output, retVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Resource: D1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (D1-monitor-interval-60s)
- Group: GR
- Meta Attrs: resource-stickiness=0
- Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DG1-monitor-interval-60s)
- Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DG2-monitor-interval-60s)
- Clone: DC-clone
- Meta Attrs: resource-stickiness=0
- Resource: DC (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DC-monitor-interval-60s)
- Clone: GRC-clone
- Meta Attrs: resource-stickiness=0
- Group: GRC
- Meta Attrs: resource-stickiness=0
- Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DGC1-monitor-interval-60s)
- Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DGC2-monitor-interval-60s)
-""")
- self.assertEqual(0, retVal)
+
+ self.assert_pcs_success("resource --full", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Group: GR
+ Meta Attrs: resource-stickiness=0
+ Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DG1-monitor-interval-10)
+ Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DG2-monitor-interval-10)
+ Clone: DC-clone
+ Meta Attrs: resource-stickiness=0
+ Resource: DC (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DC-monitor-interval-10)
+ Clone: GRC-clone
+ Meta Attrs: resource-stickiness=0
+ Group: GRC
+ Meta Attrs: resource-stickiness=0
+ Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DGC1-monitor-interval-10)
+ Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DGC2-monitor-interval-10)
+ """
+ ))
resources = set(["D1", "DG1", "DC", "DGC1"])
with open(temp_cib, "w") as f:
@@ -4585,30 +4340,31 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
self.assertEqual(0, retVal)
with open(temp_cib, "w") as f:
f.write(cib_out.toxml())
- output, retVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Resource: D1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (D1-monitor-interval-60s)
- Group: GR
- Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DG1-monitor-interval-60s)
- Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DG2-monitor-interval-60s)
- Clone: DC-clone
- Resource: DC (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DC-monitor-interval-60s)
- Clone: GRC-clone
- Group: GRC
- Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DGC1-monitor-interval-60s)
- Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DGC2-monitor-interval-60s)
-""")
- self.assertEqual(0, retVal)
+ self.assert_pcs_success("resource --full", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Group: GR
+ Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DG1-monitor-interval-10)
+ Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DG2-monitor-interval-10)
+ Clone: DC-clone
+ Resource: DC (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DC-monitor-interval-10)
+ Clone: GRC-clone
+ Group: GRC
+ Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DGC1-monitor-interval-10)
+ Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DGC2-monitor-interval-10)
+ """
+ ))
resources = set(["GRC-clone", "GRC", "DGC1", "DGC2"])
with open(temp_cib, "w") as f:
@@ -4627,30 +4383,31 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
self.assertEqual(0, retVal)
with open(temp_cib, "w") as f:
f.write(cib_out.toxml())
- output, retVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Resource: D1 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (D1-monitor-interval-60s)
- Group: GR
- Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DG1-monitor-interval-60s)
- Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DG2-monitor-interval-60s)
- Clone: DC-clone
- Resource: DC (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DC-monitor-interval-60s)
- Clone: GRC-clone
- Meta Attrs: resource-stickiness=0
- Group: GRC
- Meta Attrs: resource-stickiness=0
- Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DGC1-monitor-interval-60s)
- Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DGC2-monitor-interval-60s)
-""")
- self.assertEqual(0, retVal)
+ self.assert_pcs_success("resource --full", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Group: GR
+ Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DG1-monitor-interval-10)
+ Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DG2-monitor-interval-10)
+ Clone: DC-clone
+ Resource: DC (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DC-monitor-interval-10)
+ Clone: GRC-clone
+ Meta Attrs: resource-stickiness=0
+ Group: GRC
+ Meta Attrs: resource-stickiness=0
+ Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DGC1-monitor-interval-10)
+ Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DGC2-monitor-interval-10)
+ """
+ ))
resources = set(["GR", "DG1", "DG2", "DC-clone", "DC"])
with open(temp_cib, "w") as f:
@@ -4669,33 +4426,35 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
self.assertEqual(0, retVal)
with open(temp_cib, "w") as f:
f.write(cib_out.toxml())
- output, retVal = pcs(temp_cib, "resource --full")
- ac(output, """\
- Resource: D1 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (D1-monitor-interval-60s)
- Group: GR
- Meta Attrs: resource-stickiness=0
- Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DG1-monitor-interval-60s)
- Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DG2-monitor-interval-60s)
- Clone: DC-clone
- Meta Attrs: resource-stickiness=0
- Resource: DC (class=ocf provider=pacemaker type=Dummy)
- Meta Attrs: resource-stickiness=0
- Operations: monitor interval=60s (DC-monitor-interval-60s)
- Clone: GRC-clone
- Group: GRC
- Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DGC1-monitor-interval-60s)
- Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
- Operations: monitor interval=60s (DGC2-monitor-interval-60s)
-""")
- self.assertEqual(0, retVal)
+ self.assert_pcs_success("resource --full", outdent(
+ # pylint:disable=trailing-whitespace
+ """\
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (D1-monitor-interval-10)
+ Group: GR
+ Meta Attrs: resource-stickiness=0
+ Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DG1-monitor-interval-10)
+ Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DG2-monitor-interval-10)
+ Clone: DC-clone
+ Meta Attrs: resource-stickiness=0
+ Resource: DC (class=ocf provider=pacemaker type=Dummy)
+ Meta Attrs: resource-stickiness=0
+ Operations: monitor interval=10 timeout=20 (DC-monitor-interval-10)
+ Clone: GRC-clone
+ Group: GRC
+ Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DGC1-monitor-interval-10)
+ Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
+ Operations: monitor interval=10 timeout=20 (DGC2-monitor-interval-10)
+ """
+ ))
def testResrourceUtilizationSet(self):
+ # see also BundleMiscCommands
output, returnVal = pcs(
temp_large_cib, "resource utilization dummy test1=10"
)
@@ -4841,16 +4600,15 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
self.assert_pcs_success(
"resource create dummy ocf:heartbeat:Dummy --clone"
)
- self.assert_pcs_success(
- "resource show dummy-clone",
+ self.assert_pcs_success("resource show dummy-clone", outdent(
"""\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
- stop interval=0s timeout=20 (dummy-stop-interval-0s)
- monitor interval=10 timeout=20 (dummy-monitor-interval-10)
-"""
- )
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ start interval=0s timeout=20 (dummy-start-interval-0s)
+ stop interval=0s timeout=20 (dummy-stop-interval-0s)
+ """
+ ))
self.assert_pcs_fail(
"resource update dummy-clone op stop timeout=300",
"Error: op settings must be changed on base resource, not the clone\n"
@@ -4859,31 +4617,29 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
"resource update dummy-clone foo=bar op stop timeout=300",
"Error: op settings must be changed on base resource, not the clone\n"
)
- self.assert_pcs_success(
- "resource show dummy-clone",
+ self.assert_pcs_success("resource show dummy-clone", outdent(
"""\
- Clone: dummy-clone
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
- stop interval=0s timeout=20 (dummy-stop-interval-0s)
- monitor interval=10 timeout=20 (dummy-monitor-interval-10)
-"""
- )
+ Clone: dummy-clone
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ start interval=0s timeout=20 (dummy-start-interval-0s)
+ stop interval=0s timeout=20 (dummy-stop-interval-0s)
+ """
+ ))
def test_no_op_allowed_in_master_update(self):
self.assert_pcs_success(
"resource create dummy ocf:heartbeat:Dummy --master"
)
- self.assert_pcs_success(
- "resource show dummy-master",
+ self.assert_pcs_success("resource show dummy-master", outdent(
"""\
- Master: dummy-master
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
- stop interval=0s timeout=20 (dummy-stop-interval-0s)
- monitor interval=10 timeout=20 (dummy-monitor-interval-10)
-"""
- )
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ start interval=0s timeout=20 (dummy-start-interval-0s)
+ stop interval=0s timeout=20 (dummy-stop-interval-0s)
+ """
+ ))
self.assert_pcs_fail(
"resource update dummy-master op stop timeout=300",
"Error: op settings must be changed on base resource, not the master\n"
@@ -4892,16 +4648,15 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
"resource update dummy-master foo=bar op stop timeout=300",
"Error: op settings must be changed on base resource, not the master\n"
)
- self.assert_pcs_success(
- "resource show dummy-master",
+ self.assert_pcs_success("resource show dummy-master", outdent(
"""\
- Master: dummy-master
- Resource: dummy (class=ocf provider=heartbeat type=Dummy)
- Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
- stop interval=0s timeout=20 (dummy-stop-interval-0s)
- monitor interval=10 timeout=20 (dummy-monitor-interval-10)
-"""
- )
+ Master: dummy-master
+ Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+ Operations: monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+ start interval=0s timeout=20 (dummy-start-interval-0s)
+ stop interval=0s timeout=20 (dummy-stop-interval-0s)
+ """
+ ))
class ResourceRemoveWithTicketTest(unittest.TestCase, AssertPcsMixin):
def setUp(self):
@@ -4927,3 +4682,379 @@ class ResourceRemoveWithTicketTest(unittest.TestCase, AssertPcsMixin):
"Deleting Resource - A",
]
)
+
+
+class BundleCommon(
+ TestCase,
+ get_assert_pcs_effect_mixin(
+ lambda cib: etree.tostring(
+ # pylint:disable=undefined-variable
+ etree.parse(cib).findall(".//resources")[0]
+ )
+ )
+):
+ temp_cib = rc("temp-cib.xml")
+ empty_cib = rc("cib-empty-2.8.xml")
+
+ def setUp(self):
+ shutil.copy(self.empty_cib, self.temp_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib)
+
+ def fixture_primitive(self, name, bundle):
+ self.assert_pcs_success(
+ "resource create {0} ocf:heartbeat:Dummy bundle {1}".format(
+ name, bundle
+ )
+ )
+
+ def fixture_bundle(self, name):
+ self.assert_pcs_success(
+ "resource bundle create {0} container image=pcs:test".format(
+ name
+ )
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleDeleteTest(BundleCommon):
+ def test_without_primitive(self):
+ self.fixture_bundle("B")
+ self.assert_effect("resource delete B", "<resources/>")
+
+ def test_with_primitive(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "resource delete B",
+ "<resources/>",
+ "Deleting Resource - R\n",
+ )
+
+ def test_remove_primitive(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_effect(
+ "resource delete R",
+ """
+ <resources>
+ <bundle id="B">
+ <docker image="pcs:test" />
+ </bundle>
+ </resources>
+ """,
+ "Deleting Resource - R\n",
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleGroup(BundleCommon):
+ def test_group_add_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource group add bundles B",
+ "Error: Unable to find resource: B\n"
+ )
+
+ def test_group_add_primitive(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource group add group R",
+ "Error: cannot group bundle resources\n"
+ )
+
+ def test_group_remove_primitive(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource group remove B R",
+ "Error: Group 'B' does not exist\n"
+ )
+
+ def test_ungroup_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource ungroup B",
+ "Error: Group 'B' does not exist\n"
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleCloneMaster(BundleCommon):
+ def test_clone_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource clone B",
+ "Error: unable to find group or resource: B\n"
+ )
+
+ def test_master_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource master B",
+ "Error: Unable to find resource or group with id B\n"
+ )
+
+ def test_clone_primitive(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource clone R",
+ "Error: cannot clone bundle resource\n"
+ )
+
+ def test_master_primitive(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource master R",
+ "Error: cannot make a master/slave resource from a bundle resource\n"
+ )
+
+ def test_unclone_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource unclone B",
+ "Error: could not find resource: B\n"
+ )
+
+
+ at skip_unless_pacemaker_supports_bundle
+class BundleMiscCommands(BundleCommon):
+ def test_resource_enable_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource enable B",
+ "Error: 'B' is not clone/master/a group/primitive\n"
+ )
+
+ def test_resource_disable_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource disable B",
+ "Error: 'B' is not clone/master/a group/primitive\n"
+ )
+
+ def test_resource_manage_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource manage B",
+ "Error: 'B' is not clone/master/a group/primitive\n"
+ )
+
+ def test_resource_unmanage_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource unmanage B",
+ "Error: 'B' is not clone/master/a group/primitive\n"
+ )
+
+ def test_op_add(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource op add B monitor interval=30",
+ "Error: Unable to find resource: B\n"
+ )
+
+ def test_op_remove(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource op remove B monitor interval=30",
+ "Error: Unable to find resource: B\n"
+ )
+
+ def test_update(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource update B meta aaa=bbb",
+ "Error: Unable to find resource: B\n"
+ )
+
+ def test_meta(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource meta B aaa=bbb",
+ "Error: unable to find a resource/clone/master/group: B\n"
+ )
+
+ def test_utilization(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource utilization B aaa=10",
+ "Error: Unable to find a resource: B\n"
+ )
+
+ def test_debug_start_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-start B",
+ "Error: unable to debug-start a bundle\n"
+ )
+
+ def test_debug_start_with_resource(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-start B",
+ "Error: unable to debug-start a bundle, try the bundle's resource: R\n"
+ )
+
+ def test_debug_stop_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-stop B",
+ "Error: unable to debug-stop a bundle\n"
+ )
+
+ def test_debug_stop_with_resource(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-stop B",
+ "Error: unable to debug-stop a bundle, try the bundle's resource: R\n"
+ )
+
+ def test_debug_monitor_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-monitor B",
+ "Error: unable to debug-monitor a bundle\n"
+ )
+
+ def test_debug_monitor_with_resource(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-monitor B",
+ "Error: unable to debug-monitor a bundle, try the bundle's resource: R\n"
+ )
+
+ def test_debug_promote_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-promote B",
+ "Error: unable to debug-promote a bundle\n"
+ )
+
+ def test_debug_promote_with_resource(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-promote B",
+ "Error: unable to debug-promote a bundle, try the bundle's resource: R\n"
+ )
+
+ def test_debug_demote_bundle(self):
+ self.fixture_bundle("B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-demote B",
+ "Error: unable to debug-demote a bundle\n"
+ )
+
+ def test_debug_demote_with_resource(self):
+ self.fixture_bundle("B")
+ self.fixture_primitive("R", "B")
+ self.assert_pcs_fail_regardless_of_force(
+ "resource debug-demote B",
+ "Error: unable to debug-demote a bundle, try the bundle's resource: R\n"
+ )
+
+
+class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+ def setUp(self):
+ shutil.copy(rc('cib-empty-1.2.xml'), temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+
+ def test_update_fail_on_pacemaker_guest_attempt(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy",
+ )
+ self.assert_pcs_fail(
+ "resource update R meta remote-node=HOST",
+ "Error: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest', use --force to override\n"
+ )
+ def test_update_warn_on_pacemaker_guest_attempt(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy",
+ )
+ self.assert_pcs_success(
+ "resource update R meta remote-node=HOST --force",
+ "Warning: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest'\n"
+ )
+ def test_update_fail_on_pacemaker_guest_attempt_remove(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy meta remote-node=HOST"
+ " --force"
+ ,
+ "Warning: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest'\n"
+ )
+ self.assert_pcs_fail(
+ "resource update R meta remote-node=",
+ "Error: this command is not sufficient for removing a guest node,"
+ " use 'pcs cluster node remove-guest', use --force to override\n"
+ )
+
+ def test_update_warn_on_pacemaker_guest_attempt_remove(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy meta remote-node=HOST"
+ " --force"
+ ,
+ "Warning: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest'\n"
+ )
+ self.assert_pcs_success(
+ "resource update R meta remote-node= --force",
+ "Warning: this command is not sufficient for removing a guest node,"
+ " use 'pcs cluster node remove-guest'\n"
+ )
+
+ def test_meta_fail_on_pacemaker_guest_attempt(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy",
+ )
+ self.assert_pcs_fail(
+ "resource meta R remote-node=HOST",
+ "Error: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest', use --force to override\n"
+ )
+
+ def test_meta_warn_on_pacemaker_guest_attempt(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy",
+ )
+ self.assert_pcs_success(
+ "resource meta R remote-node=HOST --force",
+ "Warning: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest'\n"
+ )
+
+ def test_meta_fail_on_pacemaker_guest_attempt_remove(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy meta remote-node=HOST"
+ " --force"
+ ,
+ "Warning: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest'\n"
+ )
+ self.assert_pcs_fail(
+ "resource meta R remote-node=",
+ "Error: this command is not sufficient for removing a guest node,"
+ " use 'pcs cluster node remove-guest', use --force to override\n"
+ )
+
+ def test_meta_warn_on_pacemaker_guest_attempt_remove(self):
+ self.assert_pcs_success(
+ "resource create R ocf:heartbeat:Dummy meta remote-node=HOST"
+ " --force"
+ ,
+ "Warning: this command is not sufficient for creating a guest node,"
+ " use 'pcs cluster node add-guest'\n"
+ )
+ self.assert_pcs_success(
+ "resource meta R remote-node= --force",
+ "Warning: this command is not sufficient for removing a guest node,"
+ " use 'pcs cluster node remove-guest'\n"
+ )
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 5cac717..f1c2d75 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -7,25 +7,48 @@ from __future__ import (
import shutil
+from pcs import utils
+from pcs.cli.common.console_report import indent
from pcs.test.tools.assertions import AssertPcsMixin
from pcs.test.tools.misc import (
ac,
get_test_resource as rc,
+ is_minimum_pacemaker_version,
+ skip_unless_pacemaker_version,
+ outdent,
+)
+from pcs.test.tools.pcs_runner import (
+ pcs,
+ PcsRunner,
)
-from pcs.test.tools.pcs_runner import pcs, PcsRunner
-from pcs.test.tools import pcs_unittest as unittest
+from pcs.test.tools.pcs_unittest import TestCase
-from pcs import utils
empty_cib = rc("cib-empty.xml")
temp_cib = rc("temp-cib.xml")
+# target-pattern attribute was added in pacemaker 1.1.13 with validate-with 2.3.
+# However in pcs this was implemented much later together with target-attribute
+# support. In that time pacemaker 1.1.12 was quite old. To keep tests simple we
+# do not run fencing topology tests on pacemaker older that 1.1.13 even if it
+# supports targeting by node names.
+skip_unless_fencing_level_supported = skip_unless_pacemaker_version(
+ (1, 1, 13),
+ "fencing levels"
+)
+# target-attribute and target-value attributes were added in pacemaker 1.1.14
+# with validate-with 2.4.
+fencing_level_attribute_supported = is_minimum_pacemaker_version(1, 1, 14)
+skip_unless_fencing_level_attribute_supported = skip_unless_pacemaker_version(
+ (1, 1, 14),
+ "fencing levels with attribute targets"
+)
+
-class StonithDescribeTest(unittest.TestCase, AssertPcsMixin):
+class StonithDescribeTest(TestCase, AssertPcsMixin):
def setUp(self):
self.pcs_runner = PcsRunner(temp_cib)
-
def test_success(self):
self.assert_pcs_success(
"stonith describe fence_apc",
@@ -38,6 +61,12 @@ Stonith options:
"""
)
+ def test_full(self):
+ stdout, pcs_returncode = self.pcs_runner.run(
+ "stonith describe fence_apc --full",
+ )
+ self.assertEqual(0, pcs_returncode)
+ self.assertTrue("pcmk_list_retries" in stdout)
def test_nonextisting_agent(self):
self.assert_pcs_fail(
@@ -49,14 +78,12 @@ Stonith options:
)
)
-
def test_not_enough_params(self):
self.assert_pcs_fail(
"stonith describe",
stdout_start="\nUsage: pcs stonith describe...\n"
)
-
def test_too_many_params(self):
self.assert_pcs_fail(
"stonith describe agent1 agent2",
@@ -64,8 +91,9 @@ Stonith options:
)
-class StonithTest(unittest.TestCase):
+class StonithTest(TestCase, AssertPcsMixin):
def setUp(self):
+ self.pcs_runner = PcsRunner(temp_cib)
shutil.copy(empty_cib, temp_cib)
def testStonithCreation(self):
@@ -77,67 +105,77 @@ class StonithTest(unittest.TestCase):
ac(output, "Warning: Agent 'fence_noxist' is not installed or does not provide valid metadata: Metadata query for stonith:fence_noxist failed: -5\n")
self.assertEqual(returnVal, 0)
- output, returnVal = pcs(temp_cib, "stonith create test2 fence_apc")
- assert returnVal == 1
- ac(output,"Error: missing required option(s): 'ipaddr, login' for resource type: stonith:fence_apc (use --force to override)\n")
+ self.assert_pcs_fail(
+ "stonith create test2 fence_apc",
+ "Error: required resource options 'ipaddr', 'login' are missing, use --force to override\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith create test2 fence_ilo --force")
- assert returnVal == 0
- ac(output,"")
+ self.assert_pcs_success(
+ "stonith create test2 fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith create test3 fence_ilo bad_argument=test")
- assert returnVal == 1
- assert output == "Error: resource option(s): 'bad_argument', are not recognized for resource type: 'stonith:fence_ilo' (use --force to override)\n",[output]
+ self.assert_pcs_fail(
+ "stonith create test3 fence_apc bad_argument=test",
+ stdout_start="Error: invalid resource option 'bad_argument',"
+ " allowed options are:"
+ )
- output, returnVal = pcs(temp_cib, "stonith create test9 fence_apc pcmk_status_action=xxx")
- assert returnVal == 1
- ac(output,"Error: missing required option(s): 'ipaddr, login' for resource type: stonith:fence_apc (use --force to override)\n")
+ self.assert_pcs_fail(
+ "stonith create test9 fence_apc pcmk_status_action=xxx",
+ "Error: required resource options 'ipaddr', 'login' are missing, use --force to override\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith create test9 fence_ilo pcmk_status_action=xxx --force")
- assert returnVal == 0
- ac(output,"")
+ self.assert_pcs_success(
+ "stonith create test9 fence_apc pcmk_status_action=xxx --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
output, returnVal = pcs(temp_cib, "stonith show test9")
ac(output, """\
- Resource: test9 (class=stonith type=fence_ilo)
+ Resource: test9 (class=stonith type=fence_apc)
Attributes: pcmk_status_action=xxx
Operations: monitor interval=60s (test9-monitor-interval-60s)
""")
assert returnVal == 0
- output, returnVal = pcs(temp_cib, "stonith delete test9")
- assert returnVal == 0
- assert output == "Deleting Resource - test9\n",[output]
+ self.assert_pcs_success(
+ "stonith delete test9",
+ "Deleting Resource - test9\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith create test3 fence_ilo ipaddr=test")
- assert returnVal == 1
- ac(output,"Error: missing required option(s): 'login' for resource type: stonith:fence_ilo (use --force to override)\n")
+ self.assert_pcs_fail(
+ "stonith create test3 fence_ilo ipaddr=test",
+ "Error: required resource option 'login' is missing, use --force to override\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith create test3 fence_ilo ipaddr=test --force")
- assert returnVal == 0
- ac(output,"")
+ self.assert_pcs_success(
+ "stonith create test3 fence_ilo ipaddr=test --force",
+ "Warning: required resource option 'login' is missing\n"
+ )
# Testing that pcmk_host_check, pcmk_host_list & pcmk_host_map are allowed for
# stonith agents
- output, returnVal = pcs(temp_cib, 'stonith create apc-fencing fence_apc params ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"')
- assert returnVal == 0
- ac(output,"")
+ self.assert_pcs_success(
+ 'stonith create apc-fencing fence_apc ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"',
+ )
output, returnVal = pcs(temp_cib, 'resource show apc-fencing')
assert returnVal == 1
assert output == 'Error: unable to find resource \'apc-fencing\'\n',[output]
- output, returnVal = pcs(temp_cib, 'stonith show apc-fencing')
- ac(output, """\
- Resource: apc-fencing (class=stonith type=fence_apc)
- Attributes: ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"
- Operations: monitor interval=60s (apc-fencing-monitor-interval-60s)
-""")
- assert returnVal == 0
+ self.assert_pcs_success("stonith show apc-fencing", outdent(
+ """\
+ Resource: apc-fencing (class=stonith type=fence_apc)
+ Attributes: ipaddr="morph-apc" login="apc" passwd="apc" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" switch="1"
+ Operations: monitor interval=60s (apc-fencing-monitor-interval-60s)
+ """
+ ))
- output, returnVal = pcs(temp_cib, 'stonith delete apc-fencing')
- assert returnVal == 0
- assert output == 'Deleting Resource - apc-fencing\n',[output]
+ self.assert_pcs_success(
+ "stonith delete apc-fencing",
+ "Deleting Resource - apc-fencing\n"
+ )
output, returnVal = pcs(temp_cib, "stonith update test3 bad_ipaddr=test")
assert returnVal == 1
@@ -149,13 +187,13 @@ class StonithTest(unittest.TestCase):
output, returnVal = pcs(temp_cib, "stonith show test2")
assert returnVal == 0
- assert output == " Resource: test2 (class=stonith type=fence_ilo)\n Operations: monitor interval=60s (test2-monitor-interval-60s)\n",[output]
+ assert output == " Resource: test2 (class=stonith type=fence_apc)\n Operations: monitor interval=60s (test2-monitor-interval-60s)\n",[output]
output, returnVal = pcs(temp_cib, "stonith show --full")
ac(output, """\
Resource: test1 (class=stonith type=fence_noxist)
Operations: monitor interval=60s (test1-monitor-interval-60s)
- Resource: test2 (class=stonith type=fence_ilo)
+ Resource: test2 (class=stonith type=fence_apc)
Operations: monitor interval=60s (test2-monitor-interval-60s)
Resource: test3 (class=stonith type=fence_ilo)
Attributes: ipaddr=test login=testA
@@ -163,51 +201,52 @@ class StonithTest(unittest.TestCase):
""")
assert returnVal == 0
- output, returnVal = pcs(temp_cib, 'stonith create test-fencing fence_apc pcmk_host_list="rhel7-node1 rhel7-node2" op monitor interval=61s --force')
- assert returnVal == 0
- ac(output,"")
-
- output, returnVal = pcs(temp_cib, 'config show')
- ac(output, """\
-Cluster Name: test99
-Corosync Nodes:
- rh7-1 rh7-2
-Pacemaker Nodes:
-
-Resources:
-
-Stonith Devices:
- Resource: test1 (class=stonith type=fence_noxist)
- Operations: monitor interval=60s (test1-monitor-interval-60s)
- Resource: test2 (class=stonith type=fence_ilo)
- Operations: monitor interval=60s (test2-monitor-interval-60s)
- Resource: test3 (class=stonith type=fence_ilo)
- Attributes: ipaddr=test login=testA
- Operations: monitor interval=60s (test3-monitor-interval-60s)
- Resource: test-fencing (class=stonith type=fence_apc)
- Attributes: pcmk_host_list="rhel7-node1
- Operations: monitor interval=61s (test-fencing-monitor-interval-61s)
-Fencing Levels:
-
-Location Constraints:
-Ordering Constraints:
-Colocation Constraints:
-Ticket Constraints:
-
-Alerts:
- No alerts defined
-
-Resources Defaults:
- No defaults set
-Operations Defaults:
- No defaults set
-
-Cluster Properties:
+ self.assert_pcs_success(
+ "stonith create test-fencing fence_apc 'pcmk_host_list=rhel7-node1 rhel7-node2' op monitor interval=61s --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
-Quorum:
- Options:
-""")
- assert returnVal == 0
+ self.assert_pcs_success("config show", outdent(
+ """\
+ Cluster Name: test99
+ Corosync Nodes:
+ rh7-1 rh7-2
+ Pacemaker Nodes:
+
+ Resources:
+
+ Stonith Devices:
+ Resource: test1 (class=stonith type=fence_noxist)
+ Operations: monitor interval=60s (test1-monitor-interval-60s)
+ Resource: test2 (class=stonith type=fence_apc)
+ Operations: monitor interval=60s (test2-monitor-interval-60s)
+ Resource: test3 (class=stonith type=fence_ilo)
+ Attributes: ipaddr=test login=testA
+ Operations: monitor interval=60s (test3-monitor-interval-60s)
+ Resource: test-fencing (class=stonith type=fence_apc)
+ Attributes: pcmk_host_list="rhel7-node1 rhel7-node2"
+ Operations: monitor interval=61s (test-fencing-monitor-interval-61s)
+ Fencing Levels:
+
+ Location Constraints:
+ Ordering Constraints:
+ Colocation Constraints:
+ Ticket Constraints:
+
+ Alerts:
+ No alerts defined
+
+ Resources Defaults:
+ No defaults set
+ Operations Defaults:
+ No defaults set
+
+ Cluster Properties:
+
+ Quorum:
+ Options:
+ """
+ ))
def test_stonith_create_provides_unfencing(self):
if utils.is_rhel6():
@@ -320,9 +359,10 @@ Quorum:
assert output == "Error: must specify one (and only one) node to confirm fenced\n"
def testPcmkHostList(self):
- output, returnVal = pcs(temp_cib, "stonith create F1 fence_apc 'pcmk_host_list=nodea nodeb' --force")
- assert returnVal == 0
- ac(output,"")
+ self.assert_pcs_success(
+ "stonith create F1 fence_apc 'pcmk_host_list=nodea nodeb' --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
output, returnVal = pcs(temp_cib, "stonith show F1")
ac(output, """\
@@ -339,7 +379,7 @@ Quorum:
# metadata from pacemaker, this will be reviewed and fixed.
output, returnVal = pcs(
temp_cib,
- 'stonith create apc-1 fence_apc params ipaddr="ip" login="apc"'
+ 'stonith create apc-1 fence_apc ipaddr="ip" login="apc"'
)
# ac(output, """\
#Error: missing required option(s): 'port' for resource type: stonith:fence_apc (use --force to override)
@@ -350,512 +390,921 @@ Quorum:
output, returnVal = pcs(
temp_cib,
- 'stonith create apc-2 fence_apc params ipaddr="ip" login="apc" pcmk_host_map="buzz-01:1;buzz-02:2"'
+ 'stonith create apc-2 fence_apc ipaddr="ip" login="apc" pcmk_host_map="buzz-01:1;buzz-02:2"'
)
ac(output, "")
self.assertEqual(returnVal, 0)
output, returnVal = pcs(
temp_cib,
- 'stonith create apc-3 fence_apc params ipaddr="ip" login="apc" pcmk_host_list="buzz-01,buzz-02"'
+ 'stonith create apc-3 fence_apc ipaddr="ip" login="apc" pcmk_host_list="buzz-01,buzz-02"'
)
ac(output, "")
self.assertEqual(returnVal, 0)
output, returnVal = pcs(
temp_cib,
- 'stonith create apc-4 fence_apc params ipaddr="ip" login="apc" pcmk_host_argument="buzz-01"'
+ 'stonith create apc-4 fence_apc ipaddr="ip" login="apc" pcmk_host_argument="buzz-01"'
)
ac(output, "")
self.assertEqual(returnVal, 0)
- def testFenceLevels(self):
- output, returnVal = pcs(temp_cib, "stonith level remove 1 rh7-2 F1")
- assert returnVal == 1
- ac (output,'Error: unable to remove fencing level, fencing level for node: rh7-2, at level: 1, with device: F1 doesn\'t exist\n')
+ def testStonithDeleteRemovesLevel(self):
+ shutil.copy(rc("cib-empty-with3nodes.xml"), temp_cib)
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ""
+ self.assert_pcs_success(
+ "stonith create n1-ipmi fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
+ self.assert_pcs_success(
+ "stonith create n2-ipmi fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
+ self.assert_pcs_success(
+ "stonith create n1-apc1 fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
+ self.assert_pcs_success(
+ "stonith create n1-apc2 fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
+ self.assert_pcs_success(
+ "stonith create n2-apc1 fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
+ self.assert_pcs_success(
+ "stonith create n2-apc2 fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
+ self.assert_pcs_success(
+ "stonith create n2-apc3 fence_apc --force",
+ "Warning: required resource options 'ipaddr', 'login' are missing\n"
+ )
+ self.assert_pcs_success_all([
+ "stonith level add 1 rh7-1 n1-ipmi",
+ "stonith level add 2 rh7-1 n1-apc1,n1-apc2,n2-apc2",
+ "stonith level add 1 rh7-2 n2-ipmi",
+ "stonith level add 2 rh7-2 n2-apc1,n2-apc2,n2-apc3",
+ ])
- output, returnVal = pcs(temp_cib, "stonith create F1 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
- assert returnVal == 0
- ac(output,"")
+ output, returnVal = pcs(temp_cib, "stonith")
+ self.assertEqual(returnVal, 0)
+ ac(output, """\
+ n1-ipmi\t(stonith:fence_apc):\tStopped
+ n2-ipmi\t(stonith:fence_apc):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc1\t(stonith:fence_apc):\tStopped
+ n2-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc3\t(stonith:fence_apc):\tStopped
+ Target: rh7-1
+ Level 1 - n1-ipmi
+ Level 2 - n1-apc1,n1-apc2,n2-apc2
+ Target: rh7-2
+ Level 1 - n2-ipmi
+ Level 2 - n2-apc1,n2-apc2,n2-apc3
+""")
- output, returnVal = pcs(temp_cib, "stonith create F2 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
- assert returnVal == 0
- ac(output,"")
+ self.assert_pcs_success(
+ "stonith delete n2-apc2",
+ "Deleting Resource - n2-apc2\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith create F3 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
- assert returnVal == 0
- ac(output,"")
+ output, returnVal = pcs(temp_cib, "stonith")
+ self.assertEqual(returnVal, 0)
+ ac(output, """\
+ n1-ipmi\t(stonith:fence_apc):\tStopped
+ n2-ipmi\t(stonith:fence_apc):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc1\t(stonith:fence_apc):\tStopped
+ n2-apc3\t(stonith:fence_apc):\tStopped
+ Target: rh7-1
+ Level 1 - n1-ipmi
+ Level 2 - n1-apc1,n1-apc2
+ Target: rh7-2
+ Level 1 - n2-ipmi
+ Level 2 - n2-apc1,n2-apc3
+""")
- output, returnVal = pcs(temp_cib, "stonith create F4 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
- assert returnVal == 0
- ac(output,"")
+ self.assert_pcs_success(
+ "stonith delete n2-apc1",
+ "Deleting Resource - n2-apc1\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith create F5 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
- assert returnVal == 0
- ac(output,"")
+ output, returnVal = pcs(temp_cib, "stonith")
+ self.assertEqual(returnVal, 0)
+ ac(output, """\
+ n1-ipmi\t(stonith:fence_apc):\tStopped
+ n2-ipmi\t(stonith:fence_apc):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc3\t(stonith:fence_apc):\tStopped
+ Target: rh7-1
+ Level 1 - n1-ipmi
+ Level 2 - n1-apc1,n1-apc2
+ Target: rh7-2
+ Level 1 - n2-ipmi
+ Level 2 - n2-apc3
+""")
- output, returnVal = pcs(temp_cib, "stonith level add NaN rh7-1 F3,F4")
- ac(output, "Error: invalid level 'NaN', use a positive integer\n")
- assert returnVal == 1
+ self.assert_pcs_success(
+ "stonith delete n2-apc3",
+ "Deleting Resource - n2-apc3\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level add -10 rh7-1 F3,F4")
- ac(output, "Error: invalid level '-10', use a positive integer\n")
- assert returnVal == 1
+ output, returnVal = pcs(temp_cib, "stonith")
+ self.assertEqual(returnVal, 0)
+ ac(output, """\
+ n1-ipmi\t(stonith:fence_apc):\tStopped
+ n2-ipmi\t(stonith:fence_apc):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ Target: rh7-1
+ Level 1 - n1-ipmi
+ Level 2 - n1-apc1,n1-apc2
+ Target: rh7-2
+ Level 1 - n2-ipmi
+""")
- output, returnVal = pcs(temp_cib, "stonith level add 10abc rh7-1 F3,F4")
- ac(output, "Error: invalid level '10abc', use a positive integer\n")
- assert returnVal == 1
+ self.assert_pcs_success(
+ "resource delete n1-apc1",
+ "Deleting Resource - n1-apc1\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level add 0 rh7-1 F3,F4")
- ac(output, "Error: invalid level '0', use a positive integer\n")
- assert returnVal == 1
+ output, returnVal = pcs(temp_cib, "stonith")
+ self.assertEqual(returnVal, 0)
+ ac(output, """\
+ n1-ipmi\t(stonith:fence_apc):\tStopped
+ n2-ipmi\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ Target: rh7-1
+ Level 1 - n1-ipmi
+ Level 2 - n1-apc2
+ Target: rh7-2
+ Level 1 - n2-ipmi
+""")
- output, returnVal = pcs(temp_cib, "stonith level add 000 rh7-1 F3,F4")
- ac(output, "Error: invalid level '000', use a positive integer\n")
- assert returnVal == 1
+ self.assert_pcs_success("resource delete n1-apc2", outdent(
+ """\
+ Deleting Resource - n1-apc2
+ """
+ ))
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-1 F3,F4")
- assert returnVal == 0
- assert output == ""
+ output, returnVal = pcs(temp_cib, "stonith")
+ self.assertEqual(returnVal, 0)
+ ac(output, """\
+ n1-ipmi\t(stonith:fence_apc):\tStopped
+ n2-ipmi\t(stonith:fence_apc):\tStopped
+ Target: rh7-1
+ Level 1 - n1-ipmi
+ Target: rh7-2
+ Level 1 - n2-ipmi
+""")
- output, returnVal = pcs(temp_cib, "stonith level add 2 rh7-1 F5,F2")
- assert returnVal == 0
- assert output == ""
+ def testNoStonithWarning(self):
+ o,r = pcs(temp_cib, "status")
+ assert "WARNING: no stonith devices and " in o
- output, returnVal = pcs(temp_cib, "stonith level add 2 rh7-1 F5,F2")
- assert returnVal == 1
- assert output == 'Error: unable to add fencing level, fencing level for node: rh7-1, at level: 2, with device: F5,F2 already exists\n',[output]
+ o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, pcmk_host_argument=node1")
+ ac(o,"")
+ assert r == 0
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-2 F1")
- assert returnVal == 0
- assert output == ""
+ o,r = pcs(temp_cib, "status")
+ assert "WARNING: no stonith devices and " not in o
- output, returnVal = pcs(temp_cib, "stonith level add 002 rh7-2 F2")
- assert returnVal == 0
- assert output == ""
+ self.assert_pcs_success(
+ "stonith delete test_stonith",
+ "Deleting Resource - test_stonith\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith show")
- assert returnVal == 0
- ac(output,"""\
- F1\t(stonith:fence_apc):\tStopped
- F2\t(stonith:fence_apc):\tStopped
- F3\t(stonith:fence_apc):\tStopped
- F4\t(stonith:fence_apc):\tStopped
- F5\t(stonith:fence_apc):\tStopped
- Node: rh7-1
- Level 1 - F3,F4
- Level 2 - F5,F2
- Node: rh7-2
- Level 1 - F1
- Level 2 - F2
-""")
+ o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, pcmk_host_argument=node1")
+ ac(o,"")
+ assert r == 0
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-1\n Level 1 - F3,F4\n Level 2 - F5,F2\n Node: rh7-2\n Level 1 - F1\n Level 2 - F2\n',[output]
+ o,r = pcs(temp_cib, "status")
+ assert "WARNING: no stonith devices and " not in o
- output, returnVal = pcs(temp_cib, "stonith level remove 1 rh7-2 F1")
- assert returnVal == 0
- assert output == ""
- output, returnVal = pcs(temp_cib, "stonith level remove 1 rh7-2 F1")
- assert returnVal == 1
- assert output == 'Error: unable to remove fencing level, fencing level for node: rh7-2, at level: 1, with device: F1 doesn\'t exist\n',[output]
+class LevelTestsBase(TestCase, AssertPcsMixin):
+ def setUp(self):
+ if fencing_level_attribute_supported:
+ shutil.copy(rc("cib-empty-2.5-withnodes.xml"), temp_cib)
+ else:
+ shutil.copy(rc("cib-empty-2.3-withnodes.xml"), temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
+ self.config = ""
+ self.config_lines = []
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-1\n Level 1 - F3,F4\n Level 2 - F5,F2\n Node: rh7-2\n Level 2 - F2\n',[output]
+ def fixture_stonith_resource(self, name):
+ self.assert_pcs_success(
+ "stonith create {name} fence_apc 'pcmk_host_list=rh7-1 rh7-2', ipaddr=ip login=lgn"
+ .format(name=name)
+ )
- output, returnVal = pcs(temp_cib, "stonith level clear rh7-1a")
- assert returnVal == 0
- output = ""
+ def fixture_full_configuration(self):
+ self.fixture_stonith_resource("F1")
+ self.fixture_stonith_resource("F2")
+ self.fixture_stonith_resource("F3")
+
+ self.assert_pcs_success("stonith level add 1 rh7-1 F1")
+ self.assert_pcs_success("stonith level add 2 rh7-1 F2")
+ self.assert_pcs_success("stonith level add 2 rh7-2 F1")
+ self.assert_pcs_success("stonith level add 1 rh7-2 F2")
+ self.assert_pcs_success("stonith level add 4 regexp%rh7-\d F3")
+ self.assert_pcs_success("stonith level add 3 regexp%rh7-\d F2 F1")
+
+ self.config = outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1
+ Level 2 - F2
+ Target: rh7-2
+ Level 1 - F2
+ Level 2 - F1
+ Target: rh7-\d
+ Level 3 - F2,F1
+ Level 4 - F3
+ """
+ )
+ self.config_lines = self.config.splitlines()
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-1\n Level 1 - F3,F4\n Level 2 - F5,F2\n Node: rh7-2\n Level 2 - F2\n',[output]
+ if not fencing_level_attribute_supported:
+ return
+ self.assert_pcs_success(
+ "stonith level add 5 attrib%fencewith=levels1 F3 F2"
+ )
+ self.assert_pcs_success(
+ "stonith level add 6 attrib%fencewith=levels2 F3 F1"
+ )
+ self.config += outdent(
+ """\
+ Target: fencewith=levels1
+ Level 5 - F3,F2
+ Target: fencewith=levels2
+ Level 6 - F3,F1
+ """)
+ self.config_lines = self.config.splitlines()
+
+
+ at skip_unless_fencing_level_supported
+class LevelBadCommand(LevelTestsBase):
+ def test_success(self):
+ self.assert_pcs_fail(
+ "stonith level nonsense",
+ stdout_start="\nUsage: pcs stonith level ...\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level clear rh7-1")
- assert returnVal == 0
- output = ""
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-2\n Level 2 - F2\n',[output]
+ at skip_unless_fencing_level_supported
+class LevelAddTargetUpgradesCib(LevelTestsBase):
+ def setUp(self):
+ shutil.copy(rc("cib-empty-withnodes.xml"), temp_cib)
+ self.pcs_runner = PcsRunner(temp_cib)
- output, returnVal = pcs(temp_cib, "stonith level add 2 rh7-1 F5,F2")
- assert returnVal == 0
- assert output == ""
+ @skip_unless_fencing_level_attribute_supported
+ def test_attribute(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_success(
+ "stonith level add 1 attrib%fencewith=levels F1",
+ "CIB has been upgraded to the latest schema version.\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: fencewith=levels
+ Level 1 - F1
+ """
+ )
+ )
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-1 F3,F4")
- assert returnVal == 0
- assert output == ""
+ def test_regexp(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_success(
+ "stonith level add 1 regexp%node-\d+ F1",
+ "CIB has been upgraded to the latest schema version.\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: node-\d+
+ Level 1 - F1
+ """
+ )
+ )
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-1\n Level 1 - F3,F4\n Level 2 - F5,F2\n Node: rh7-2\n Level 2 - F2\n',[output]
- output, returnVal = pcs(temp_cib, "stonith level clear")
- assert returnVal == 0
- assert output == ""
+ at skip_unless_fencing_level_supported
+class LevelAdd(LevelTestsBase):
+ def test_not_enough_params(self):
+ self.assert_pcs_fail(
+ "stonith level add",
+ stdout_start="\nUsage: pcs stonith level add...\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == '',[output]
+ self.assert_pcs_fail(
+ "stonith level add 1",
+ stdout_start="\nUsage: pcs stonith level add...\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level 1")
- assert returnVal == 1
- assert output.startswith("pcs stonith level: invalid option")
-# ac (output,"pcs stonith level: invalid option -- '1'\n\nUsage: pcs stonith level...\n level\n Lists all of the fencing levels currently configured\n\n level add <level> <node> <devices>\n Add the fencing level for the specified node with a comma separated\n list of devices (stonith ids) to attempt for that node at that level.\n Fence levels are attempted in numerical order (starting with 1) if\n a level succeeds (meaning all devices are s [...]
+ self.assert_pcs_fail(
+ "stonith level add 1 nodeA",
+ stdout_start="\nUsage: pcs stonith level add...\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level abcd")
- assert returnVal == 1
- assert output.startswith("pcs stonith level: invalid option")
-# assert output == "pcs stonith level: invalid option -- 'abcd'\n\nUsage: pcs stonith level...\n level\n Lists all of the fencing levels currently configured\n\n level add <level> <node> <devices>\n Add the fencing level for the specified node with a comma separated\n list of devices (stonith ids) to attempt for that node at that level.\n Fence levels are attempted in numerical order (starting with 1) if\n a level succeeds (meaning all devi [...]
+ def test_add_wrong_target_type(self):
+ self.assert_pcs_fail(
+ "stonith level add 1 error%value F1",
+ "Error: 'error' is not an allowed type for 'error%value', "
+ "use attrib, node, regexp\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-1 blah")
- assert returnVal == 1
- assert output == 'Error: blah is not a stonith id (use --force to override)\n'
+ def test_add_bad_level(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_fail(
+ "stonith level add NaN rh7-1 F1",
+ "Error: 'NaN' is not a valid level value, use a positive integer\n"
+ )
+ self.assert_pcs_fail(
+ "stonith level add -10 rh7-1 F1",
+ "Error: '-10' is not a valid level value, use a positive integer\n"
+ )
+ self.assert_pcs_fail(
+ "stonith level add 10abc rh7-1 F1",
+ "Error: '10abc' is not a valid level value, use a positive integer\n"
+ )
+ self.assert_pcs_fail(
+ "stonith level add 0 rh7-1 F1",
+ "Error: '0' is not a valid level value, use a positive integer\n"
+ )
+ self.assert_pcs_fail(
+ "stonith level add 000 rh7-1 F1",
+ "Error: '000' is not a valid level value, use a positive integer\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-1 blah --force")
- assert returnVal == 0
- assert output == ''
+ def test_add_bad_device(self):
+ self.assert_pcs_fail(
+ "stonith level add 1 rh7-1 dev at ce",
+ "Error: invalid device id 'dev at ce', '@' is not a valid character "
+ "for a device id\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-1\n Level 1 - blah\n',[output]
+ def test_add_more_errors(self):
+ self.assert_pcs_fail(
+ "stonith level add x rh7-X F0 dev at ce",
+ outdent(
+ """\
+ Error: 'x' is not a valid level value, use a positive integer
+ Error: Node 'rh7-X' does not appear to exist in configuration, use --force to override
+ Error: invalid device id 'dev at ce', '@' is not a valid character for a device id
+ Error: Stonith resource(s) 'F0' do not exist, use --force to override
+ """
+ )
+ )
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-9 F1")
- assert returnVal == 1
- assert output == 'Error: rh7-9 is not currently a node (use --force to override)\n'
+ self.assert_pcs_fail(
+ "stonith level add x rh7-X F0 dev at ce --force",
+ outdent(
+ """\
+ Error: 'x' is not a valid level value, use a positive integer
+ Error: invalid device id 'dev at ce', '@' is not a valid character for a device id
+ Warning: Node 'rh7-X' does not appear to exist in configuration
+ Warning: Stonith resource(s) 'F0' do not exist
+ """
+ )
+ )
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-1\n Level 1 - blah\n',[output]
+ def test_add_level_leading_zero(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_success("stonith level add 0002 rh7-1 F1")
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 2 - F1
+ """
+ )
+ )
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-9 F1 --force")
- assert returnVal == 0
- assert output == ''
+ def test_add_node(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_success("stonith level add 1 rh7-1 F1")
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1
+ """
+ )
+ )
- output, returnVal = pcs(temp_cib, "stonith level")
- assert returnVal == 0
- assert output == ' Node: rh7-1\n Level 1 - blah\n Node: rh7-9\n Level 1 - F1\n',[output]
+ self.assert_pcs_fail(
+ "stonith level add 1 rh7-1 F1",
+ "Error: Fencing level for 'rh7-1' at level '1' with device(s) "
+ "'F1' already exists\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 1")
- assert r == 0
- assert o == ""
+ def test_add_node_pattern(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_success("stonith level add 1 regexp%rh7-\d F1")
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-\d
+ Level 1 - F1
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level add 1 rh7-1 F1,F2")
- o,r = pcs(temp_cib, "stonith level add 2 rh7-1 F1,F2")
- o,r = pcs(temp_cib, "stonith level add 3 rh7-1 F1,F2")
- o,r = pcs(temp_cib, "stonith level add 4 rh7-1 F1,F2")
- o,r = pcs(temp_cib, "stonith level add 5 rh7-1 F1,F2")
- o,r = pcs(temp_cib, "stonith level add 1 rh7-2 F3")
- o,r = pcs(temp_cib, "stonith level add 2 rh7-2 F3")
+ self.assert_pcs_fail(
+ "stonith level add 1 regexp%rh7-\d F1",
+ "Error: Fencing level for 'rh7-\d' at level '1' with device(s) "
+ "'F1' already exists\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-\d
+ Level 1 - F1
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 5 rh7-1")
- assert r == 0
- assert o == ""
+ @skip_unless_fencing_level_attribute_supported
+ def test_add_node_attribute(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_success(
+ "stonith level add 1 attrib%fencewith=levels F1"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: fencewith=levels
+ Level 1 - F1
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 4 rh7-1 F2")
- assert r == 1
- assert o == "Error: unable to remove fencing level, fencing level for node: rh7-1, at level: 4, with device: F2 doesn't exist\n"
+ self.assert_pcs_fail(
+ "stonith level add 1 attrib%fencewith=levels F1",
+ "Error: Fencing level for 'fencewith=levels' at level '1' with "
+ "device(s) 'F1' already exists\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: fencewith=levels
+ Level 1 - F1
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 4 rh7-1 F1")
- assert r == 1
- assert o == "Error: unable to remove fencing level, fencing level for node: rh7-1, at level: 4, with device: F1 doesn't exist\n"
+ def test_add_more_devices(self):
+ self.fixture_stonith_resource("F1")
+ self.fixture_stonith_resource("F2")
+ self.assert_pcs_success("stonith level add 1 rh7-1 F1 F2")
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1,F2
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 4 rh7-1")
- assert r == 0
- assert o == ""
+ def test_add_more_devices_old_syntax(self):
+ self.fixture_stonith_resource("F1")
+ self.fixture_stonith_resource("F2")
+ self.fixture_stonith_resource("F3")
- o,r = pcs(temp_cib, "stonith level remove 3")
- assert r == 0
- assert o == ""
+ self.assert_pcs_success("stonith level add 1 rh7-1 F1,F2")
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1,F2
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 2 F1 F2")
- assert r == 0
- assert o == ""
+ self.assert_pcs_success("stonith level add 2 rh7-1 F1,F2 F3")
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1,F2
+ Level 2 - F1,F2,F3
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level")
- assert r == 0
- ac(o," Node: rh7-1\n Level 1 - F1,F2\n Node: rh7-2\n Level 1 - F3\n Level 2 - F3\n")
+ self.assert_pcs_success("stonith level add 3 rh7-1 F1 F2,F3")
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1,F2
+ Level 2 - F1,F2,F3
+ Level 3 - F1,F2,F3
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 2 F3")
- assert r == 0
- assert o == ""
+ def test_nonexistant_node(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_fail(
+ "stonith level add 1 rh7-X F1",
+ "Error: Node 'rh7-X' does not appear to exist in configuration"
+ ", use --force to override\n"
+ )
+ self.assert_pcs_success(
+ "stonith level add 1 rh7-X F1 --force",
+ "Warning: Node 'rh7-X' does not appear to exist in configuration\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-X
+ Level 1 - F1
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level remove 1 rh7-1")
- assert r == 0
- assert o == ""
+ def test_nonexistant_device(self):
+ self.assert_pcs_fail(
+ "stonith level add 1 rh7-1 F1",
+ "Error: Stonith resource(s) 'F1' do not exist"
+ ", use --force to override\n"
+ )
+ self.assert_pcs_success(
+ "stonith level add 1 rh7-1 F1 --force",
+ "Warning: Stonith resource(s) 'F1' do not exist\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level")
- assert r == 0
- ac(o," Node: rh7-2\n Level 1 - F3\n")
+ def test_nonexistant_devices(self):
+ self.fixture_stonith_resource("F1")
+ self.assert_pcs_fail(
+ "stonith level add 1 rh7-1 F1 F2 F3",
+ "Error: Stonith resource(s) 'F2', 'F3' do not exist"
+ ", use --force to override\n"
+ )
+ self.assert_pcs_success(
+ "stonith level add 1 rh7-1 F1 F2 F3 --force",
+ "Warning: Stonith resource(s) 'F2', 'F3' do not exist\n"
+ )
+ self.assert_pcs_success(
+ "stonith level",
+ outdent(
+ """\
+ Target: rh7-1
+ Level 1 - F1,F2,F3
+ """
+ )
+ )
- o,r = pcs(temp_cib, "stonith level add 1 rh7-1 F1,F2")
- assert r == 0
- ac(o,"")
- o,r = pcs(temp_cib, "stonith level clear F4")
- assert r == 0
- ac(o,"")
+ at skip_unless_fencing_level_supported
+class LevelConfig(LevelTestsBase):
+ full_config = outdent(
+ """\
+ Cluster Name: test99
+ Corosync Nodes:
+ rh7-1 rh7-2
+ Pacemaker Nodes:
+ rh7-1 rh7-2
- o,r = pcs(temp_cib, "stonith level clear F2")
- assert r == 0
- ac(o,"")
+ Resources:
- o,r = pcs(temp_cib, "stonith level")
- assert r == 0
- ac(o," Node: rh7-1\n Level 1 - F1,F2\n Node: rh7-2\n Level 1 - F3\n")
+ Stonith Devices:{devices}
+ Fencing Levels:{levels}
- o,r = pcs(temp_cib, "stonith level clear F1,F2")
- assert r == 0
- ac(o,"")
+ Location Constraints:
+ Ordering Constraints:
+ Colocation Constraints:
+ Ticket Constraints:
- o,r = pcs(temp_cib, "stonith level")
- assert r == 0
- ac(o," Node: rh7-2\n Level 1 - F3\n")
+ Alerts:
+ No alerts defined
- o,r = pcs(temp_cib, "stonith level clear")
- o,r = pcs(temp_cib, "stonith level")
- assert r == 0
- ac(o,"")
+ Resources Defaults:
+ No defaults set
+ Operations Defaults:
+ No defaults set
- o,r = pcs(temp_cib, "stonith level add 10 rh7-1 F1")
- assert r == 0
- ac(o,"")
+ Cluster Properties:
- o,r = pcs(temp_cib, "stonith level add 010 rh7-1 F2")
- assert r == 0
- ac(o,"")
+ Quorum:
+ Options:
+ """
+ )
- o,r = pcs(temp_cib, "stonith level")
- assert r == 0
- ac(o, """\
- Node: rh7-1
- Level 10 - F1
- Level 10 - F2
-""")
+ def test_empty(self):
+ self.assert_pcs_success("stonith level config", "")
+ self.assert_pcs_success("stonith level", "")
+ self.assert_pcs_success("stonith", "NO stonith devices configured\n")
+ self.assert_pcs_success(
+ "config",
+ self.full_config.format(devices="", levels="")
+ )
- o,r = pcs(temp_cib, "stonith level clear")
- assert r == 0
- ac(o,"")
+ def test_all_posibilities(self):
+ self.fixture_full_configuration()
+ self.assert_pcs_success("stonith level config", self.config)
+ self.assert_pcs_success("stonith level", self.config)
+ self.assert_pcs_success(
+ "stonith",
+ outdent(
+ """\
+ F1\t(stonith:fence_apc):\tStopped
+ F2\t(stonith:fence_apc):\tStopped
+ F3\t(stonith:fence_apc):\tStopped
+ """
+ ) + "\n".join(indent(self.config_lines, 1)) + "\n"
+ )
+ self.assert_pcs_success(
+ "config",
+ self.full_config.format(
+ devices="""
+ Resource: F1 (class=stonith type=fence_apc)
+ Attributes: ipaddr=ip login=lgn pcmk_host_list="rh7-1 rh7-2,"
+ Operations: monitor interval=60s (F1-monitor-interval-60s)
+ Resource: F2 (class=stonith type=fence_apc)
+ Attributes: ipaddr=ip login=lgn pcmk_host_list="rh7-1 rh7-2,"
+ Operations: monitor interval=60s (F2-monitor-interval-60s)
+ Resource: F3 (class=stonith type=fence_apc)
+ Attributes: ipaddr=ip login=lgn pcmk_host_list="rh7-1 rh7-2,"
+ Operations: monitor interval=60s (F3-monitor-interval-60s)\
+""",
+ levels=("\n" + "\n".join(indent(self.config_lines, 2)))
+ )
+ )
- o,r = pcs(temp_cib, "stonith level add 1 rh7-bad F1 --force")
- assert r == 0
- ac(o,"")
- o,r = pcs(temp_cib, "stonith level verify")
- assert r == 1
- ac(o,"Error: rh7-bad is not currently a node\n")
+ at skip_unless_fencing_level_supported
+class LevelClear(LevelTestsBase):
+ def setUp(self):
+ super(LevelClear, self).setUp()
+ self.fixture_full_configuration()
- o,r = pcs(temp_cib, "stonith level clear")
- o,r = pcs(temp_cib, "stonith level add 1 rh7-1 F1,FBad --force")
- assert r == 0
- ac(o,"")
+ def test_clear_all(self):
+ self.assert_pcs_success("stonith level clear")
+ self.assert_pcs_success("stonith level config", "")
- o,r = pcs(temp_cib, "stonith level verify")
- assert r == 1
- ac(o,"Error: FBad is not a stonith id\n")
+ def test_clear_nonexistant_node_or_device(self):
+ self.assert_pcs_success("stonith level clear rh-X")
+ self.assert_pcs_success("stonith level config", self.config)
- o,r = pcs(temp_cib, "cluster verify")
- assert r == 1
- ac(o,"Error: FBad is not a stonith id\n")
+ def test_clear_nonexistant_devices(self):
+ self.assert_pcs_success("stonith level clear F1,F5")
+ self.assert_pcs_success("stonith level config", self.config)
- def testStonithDeleteRemovesLevel(self):
- output, returnVal = pcs(
- temp_cib, "stonith create n1-ipmi fence_ilo --force"
- )
- self.assertEqual(returnVal, 0)
- ac(output, "")
+ def test_pattern_is_not_device(self):
+ self.assert_pcs_success("stonith level clear regexp%F1")
+ self.assert_pcs_success("stonith level config", self.config)
- output, returnVal = pcs(
- temp_cib, "stonith create n2-ipmi fence_ilo --force"
+ def test_clear_node(self):
+ self.assert_pcs_success("stonith level clear rh7-1")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[3:]) + "\n"
)
- self.assertEqual(returnVal, 0)
- ac(output, "")
- output, returnVal = pcs(
- temp_cib, "stonith create n1-apc1 fence_apc --force"
+ def test_clear_pattern(self):
+ self.assert_pcs_success("stonith level clear regexp%rh7-\d")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:6] + self.config_lines[9:]) + "\n"
)
- self.assertEqual(returnVal, 0)
- ac(output, "")
- output, returnVal = pcs(
- temp_cib, "stonith create n1-apc2 fence_apc --force"
+ @skip_unless_fencing_level_attribute_supported
+ def test_clear_attribute(self):
+ self.assert_pcs_success("stonith level clear attrib%fencewith=levels2")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:11]) + "\n"
)
- self.assertEqual(returnVal, 0)
- ac(output, "")
- output, returnVal = pcs(
- temp_cib, "stonith create n2-apc1 fence_apc --force"
+ def test_clear_device(self):
+ self.assert_pcs_success("stonith level clear F1")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(
+ self.config_lines[0:1]
+ +
+ self.config_lines[2:5]
+ +
+ self.config_lines[6:]
+ ) + "\n"
)
- self.assertEqual(returnVal, 0)
- ac(output, "")
- output, returnVal = pcs(
- temp_cib, "stonith create n2-apc2 fence_apc --force"
+ def test_clear_devices(self):
+ self.assert_pcs_success("stonith level clear F2,F1")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:7] + self.config_lines[8:]) + "\n"
)
- self.assertEqual(returnVal, 0)
- ac(output, "")
- output, returnVal = pcs(
- temp_cib, "stonith create n2-apc3 fence_apc --force"
- )
- self.assertEqual(returnVal, 0)
- ac(output, "")
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-1 n1-ipmi")
- self.assertEqual(returnVal, 0)
- ac(output, "")
+ at skip_unless_fencing_level_supported
+class LevelRemove(LevelTestsBase):
+ def setUp(self):
+ super(LevelRemove, self).setUp()
+ self.fixture_full_configuration()
- output, returnVal = pcs(
- temp_cib, "stonith level add 2 rh7-1 n1-apc1,n1-apc2,n2-apc2"
+ def test_nonexisting_level_node_device(self):
+ self.assert_pcs_fail(
+ "stonith level remove 1 rh7-1 F3",
+ outdent(
+ """\
+ Error: Fencing level for 'rh7-1' at level '1' with device(s) 'F3' does not exist
+ Error: Fencing level at level '1' with device(s) 'rh7-1,F3' does not exist
+ """
+ )
)
- self.assertEqual(returnVal, 0)
- ac(output, "")
-
- output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-2 n2-ipmi")
- self.assertEqual(returnVal, 0)
- ac(output, "")
+ self.assert_pcs_success("stonith level config", self.config)
- output, returnVal = pcs(
- temp_cib, "stonith level add 2 rh7-2 n2-apc1,n2-apc2,n2-apc3"
+ def test_nonexisting_level_pattern_device(self):
+ self.assert_pcs_fail(
+ "stonith level remove 1 regexp%rh7-\d F3",
+ "Error: Fencing level for 'rh7-\d' at level '1' with device(s) 'F3' does not exist\n"
)
- self.assertEqual(returnVal, 0)
- ac(output, "")
+ self.assert_pcs_success("stonith level config", self.config)
- output, returnVal = pcs(temp_cib, "stonith")
- self.assertEqual(returnVal, 0)
- ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped
- n2-ipmi\t(stonith:fence_ilo):\tStopped
- n1-apc1\t(stonith:fence_apc):\tStopped
- n1-apc2\t(stonith:fence_apc):\tStopped
- n2-apc1\t(stonith:fence_apc):\tStopped
- n2-apc2\t(stonith:fence_apc):\tStopped
- n2-apc3\t(stonith:fence_apc):\tStopped
- Node: rh7-1
- Level 1 - n1-ipmi
- Level 2 - n1-apc1,n1-apc2,n2-apc2
- Node: rh7-2
- Level 1 - n2-ipmi
- Level 2 - n2-apc1,n2-apc2,n2-apc3
-""")
+ self.assert_pcs_fail(
+ "stonith level remove 3 regexp%rh7-\d F1,F2",
+ "Error: Fencing level for 'rh7-\d' at level '3' with device(s) 'F1,F2' does not exist\n"
+ )
+ self.assert_pcs_success("stonith level config", self.config)
- output, returnVal = pcs(temp_cib, "stonith delete n2-apc2")
- self.assertEqual(returnVal, 0)
- ac(output, "Deleting Resource - n2-apc2\n")
+ def test_nonexisting_level(self):
+ self.assert_pcs_fail(
+ "stonith level remove 9",
+ "Error: Fencing level at level '9' does not exist\n"
+ )
+ self.assert_pcs_success("stonith level config", self.config)
- output, returnVal = pcs(temp_cib, "stonith")
- self.assertEqual(returnVal, 0)
- ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped
- n2-ipmi\t(stonith:fence_ilo):\tStopped
- n1-apc1\t(stonith:fence_apc):\tStopped
- n1-apc2\t(stonith:fence_apc):\tStopped
- n2-apc1\t(stonith:fence_apc):\tStopped
- n2-apc3\t(stonith:fence_apc):\tStopped
- Node: rh7-1
- Level 1 - n1-ipmi
- Level 2 - n1-apc1,n1-apc2
- Node: rh7-2
- Level 1 - n2-ipmi
- Level 2 - n2-apc1,n2-apc3
-""")
+ def test_remove_level(self):
+ self.assert_pcs_success("stonith level remove 1")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(
+ self.config_lines[0:1]
+ +
+ self.config_lines[2:4]
+ +
+ self.config_lines[5:]
+ ) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith delete n2-apc1")
- self.assertEqual(returnVal, 0)
- ac(output, "Deleting Resource - n2-apc1\n")
+ def test_remove_level_node(self):
+ self.assert_pcs_success("stonith level remove 1 rh7-2")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:4] + self.config_lines[5:]) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith")
- self.assertEqual(returnVal, 0)
- ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped
- n2-ipmi\t(stonith:fence_ilo):\tStopped
- n1-apc1\t(stonith:fence_apc):\tStopped
- n1-apc2\t(stonith:fence_apc):\tStopped
- n2-apc3\t(stonith:fence_apc):\tStopped
- Node: rh7-1
- Level 1 - n1-ipmi
- Level 2 - n1-apc1,n1-apc2
- Node: rh7-2
- Level 1 - n2-ipmi
- Level 2 - n2-apc3
-""")
+ def test_remove_level_pattern(self):
+ self.assert_pcs_success("stonith level remove 3 regexp%rh7-\d")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:7] + self.config_lines[8:]) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith delete n2-apc3")
- self.assertEqual(returnVal, 0)
- ac(output, "Deleting Resource - n2-apc3\n")
+ @skip_unless_fencing_level_attribute_supported
+ def test_remove_level_attrib(self):
+ self.assert_pcs_success(
+ "stonith level remove 6 attrib%fencewith=levels2"
+ )
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:11]) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith")
- self.assertEqual(returnVal, 0)
- ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped
- n2-ipmi\t(stonith:fence_ilo):\tStopped
- n1-apc1\t(stonith:fence_apc):\tStopped
- n1-apc2\t(stonith:fence_apc):\tStopped
- Node: rh7-1
- Level 1 - n1-ipmi
- Level 2 - n1-apc1,n1-apc2
- Node: rh7-2
- Level 1 - n2-ipmi
-""")
+ def test_remove_level_device(self):
+ self.assert_pcs_success("stonith level remove 1 F2")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:4] + self.config_lines[5:]) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "resource delete n1-apc1")
- self.assertEqual(returnVal, 0)
- ac(output, "Deleting Resource - n1-apc1\n")
+ def test_remove_level_devices(self):
+ self.assert_pcs_success("stonith level remove 3 F2 F1")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:7] + self.config_lines[8:]) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith")
- self.assertEqual(returnVal, 0)
- ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped
- n2-ipmi\t(stonith:fence_ilo):\tStopped
- n1-apc2\t(stonith:fence_apc):\tStopped
- Node: rh7-1
- Level 1 - n1-ipmi
- Level 2 - n1-apc2
- Node: rh7-2
- Level 1 - n2-ipmi
-""")
+ def test_remove_level_devices_old_syntax(self):
+ self.assert_pcs_success("stonith level remove 3 F2,F1")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:7] + self.config_lines[8:]) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "resource delete n1-apc2")
- self.assertEqual(returnVal, 0)
- ac(output, "Deleting Resource - n1-apc2\n")
+ def test_remove_level_node_device(self):
+ self.assert_pcs_success("stonith level remove 1 rh7-2 F2")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:4] + self.config_lines[5:]) + "\n"
+ )
- output, returnVal = pcs(temp_cib, "stonith")
- self.assertEqual(returnVal, 0)
- ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped
- n2-ipmi\t(stonith:fence_ilo):\tStopped
- Node: rh7-1
- Level 1 - n1-ipmi
- Node: rh7-2
- Level 1 - n2-ipmi
-""")
+ def test_remove_level_pattern_device(self):
+ self.assert_pcs_success("stonith level remove 3 regexp%rh7-\d F2 F1")
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:7] + self.config_lines[8:]) + "\n"
+ )
- def testNoStonithWarning(self):
- o,r = pcs(temp_cib, "status")
- assert "WARNING: no stonith devices and " in o
+ @skip_unless_fencing_level_attribute_supported
+ def test_remove_level_attrib_device(self):
+ self.assert_pcs_success(
+ "stonith level remove 6 attrib%fencewith=levels2 F3 F1"
+ )
+ self.assert_pcs_success(
+ "stonith level config",
+ "\n".join(self.config_lines[:11]) + "\n"
+ )
- o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, pcmk_host_argument=node1")
- ac(o,"")
- assert r == 0
- o,r = pcs(temp_cib, "status")
- assert "WARNING: no stonith devices and " not in o
+ at skip_unless_fencing_level_supported
+class LevelVerify(LevelTestsBase):
+ def test_success(self):
+ self.fixture_full_configuration()
+ self.assert_pcs_success("stonith level verify", "")
- o,r = pcs(temp_cib, "stonith delete test_stonith")
- ac(o,"Deleting Resource - test_stonith\n")
- assert r == 0
+ def test_errors(self):
+ self.fixture_stonith_resource("F1")
- o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, pcmk_host_argument=node1 --clone")
- ac(o,"")
- assert r == 0
+ self.assert_pcs_success("stonith level add 1 rh7-1 F1")
+ self.assert_pcs_success(
+ "stonith level add 2 rh7-1 FX --force",
+ "Warning: Stonith resource(s) 'FX' do not exist\n"
+ )
+ self.assert_pcs_success(
+ "stonith level add 1 rh7-X FX --force",
+ outdent(
+ """\
+ Warning: Node 'rh7-X' does not appear to exist in configuration
+ Warning: Stonith resource(s) 'FX' do not exist
+ """
+ )
+ )
+ self.assert_pcs_success(
+ "stonith level add 2 rh7-Y FY --force",
+ outdent(
+ """\
+ Warning: Node 'rh7-Y' does not appear to exist in configuration
+ Warning: Stonith resource(s) 'FY' do not exist
+ """
+ )
+ )
+ self.assert_pcs_success(
+ "stonith level add 4 regexp%rh7-\d FX --force",
+ "Warning: Stonith resource(s) 'FX' do not exist\n"
+ )
+ self.assert_pcs_success(
+ "stonith level add 3 regexp%rh7-\d FY FZ --force",
+ "Warning: Stonith resource(s) 'FY', 'FZ' do not exist\n"
+ )
- o,r = pcs(temp_cib, "status")
- assert "WARNING: no stonith devices and " not in o
+ self.assert_pcs_fail(
+ "stonith level verify",
+ outdent(
+ """\
+ Error: Stonith resource(s) 'FX', 'FY', 'FZ' do not exist
+ Error: Node 'rh7-X' does not appear to exist in configuration
+ Error: Node 'rh7-Y' does not appear to exist in configuration
+ """
+ )
+ )
diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
index c4c6d87..dba00c4 100644
--- a/pcs/test/test_utils.py
+++ b/pcs/test/test_utils.py
@@ -19,8 +19,11 @@ except ImportError:
from pcs.test.tools.xml import dom_get_child_elements
from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_unittest import mock
from pcs import utils
+from pcs.lib import reports
+from pcs.lib.errors import ReportItemSeverity
cib_with_nodes = rc("cib-empty-withnodes.xml")
empty_cib = rc("cib-empty.xml")
@@ -80,6 +83,11 @@ class UtilsTest(unittest.TestCase):
</primitive>
</group>
</master>
+ <bundle id="myBundle">
+ <primitive id="myBundledResource"
+ class="ocf" provider="heartbeat" type="Dummy" />
+ </bundle>
+ <bundle id="myEmptyBundle"/>
</resources>
""").documentElement
resources = cib_dom.getElementsByTagName("resources")[0]
@@ -116,6 +124,11 @@ class UtilsTest(unittest.TestCase):
self.assertFalse(
utils.dom_get_resource_clone_ms_parent(cib_dom, "myMasteredResource")
)
+ self.assertIsNone(utils.dom_get_bundle(cib_dom, "myResource"))
+ self.assertIsNone(utils.dom_get_bundle(cib_dom, "notExisting"))
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myBundledResource")
+ )
cib_dom = self.get_cib_resources()
all_ids = set([
@@ -126,13 +139,15 @@ class UtilsTest(unittest.TestCase):
"myGroup", "myGroupedResource",
"myGroupClone", "myClonedGroup", "myClonedGroupedResource",
"myGroupMaster", "myMasteredGroup", "myMasteredGroupedResource",
+ "myBundledResource", "myBundle", "myEmptyBundle",
])
resource_ids = set([
"myResource",
"myClonedResource", "myUniqueClonedResource",
"myGroupedResource", "myMasteredResource",
- "myClonedGroupedResource", "myMasteredGroupedResource"
+ "myClonedGroupedResource", "myMasteredGroupedResource",
+ "myBundledResource",
])
test_dom_get(
utils.dom_get_resource, cib_dom,
@@ -183,6 +198,11 @@ class UtilsTest(unittest.TestCase):
master_ids, all_ids - master_ids
)
+ bundle_ids = set(["myBundle", "myEmptyBundle"])
+ test_dom_get(
+ utils.dom_get_bundle, cib_dom,
+ bundle_ids, all_ids - bundle_ids
+ )
self.assert_element_id(
utils.dom_get_clone_ms_resource(cib_dom, "myClone"),
@@ -246,6 +266,54 @@ class UtilsTest(unittest.TestCase):
utils.dom_get_resource_clone_ms_parent(cib_dom, "myGroupedResource")
)
+ self.assertIsNone(utils.dom_get_resource_bundle(
+ utils.dom_get_bundle(cib_dom, "myEmptyBundle")
+ ))
+ self.assert_element_id(
+ utils.dom_get_resource_bundle(
+ utils.dom_get_bundle(cib_dom, "myBundle")
+ ),
+ "myBundledResource",
+ "primitive"
+ )
+
+ self.assert_element_id(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myBundledResource"),
+ "myBundle"
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myResource")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myClone")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myClonedResource")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myMaster")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myMasteredGroup")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myGroup")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myGroupedResource")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myGroupClone")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(cib_dom, "myClonedGroup")
+ )
+ self.assertIsNone(
+ utils.dom_get_resource_bundle_parent(
+ cib_dom, "myClonedGroupedResource"
+ )
+ )
+
def testDomGetResourceRemoteNodeName(self):
dom = self.get_cib_empty()
new_resources = xml.dom.minidom.parseString("""
@@ -423,21 +491,21 @@ class UtilsTest(unittest.TestCase):
cc1 = utils.dom_get_element_with_id(dom, "cc", "cc1")
self.assert_element_id(
- utils.dom_get_parent_by_tag_name(bb1, "aa"),
+ utils.dom_get_parent_by_tag_names(bb1, ["aa"]),
"aa1"
)
self.assert_element_id(
- utils.dom_get_parent_by_tag_name(cc1, "aa"),
+ utils.dom_get_parent_by_tag_names(cc1, ["aa"]),
"aa1"
)
self.assert_element_id(
- utils.dom_get_parent_by_tag_name(cc1, "bb"),
+ utils.dom_get_parent_by_tag_names(cc1, ["bb"]),
"bb2"
)
- self.assertEqual(None, utils.dom_get_parent_by_tag_name(bb1, "cc"))
- self.assertEqual(None, utils.dom_get_parent_by_tag_name(cc1, "dd"))
- self.assertEqual(None, utils.dom_get_parent_by_tag_name(cc1, "ee"))
+ self.assertEqual(None, utils.dom_get_parent_by_tag_names(bb1, ["cc"]))
+ self.assertEqual(None, utils.dom_get_parent_by_tag_names(cc1, ["dd"]))
+ self.assertEqual(None, utils.dom_get_parent_by_tag_names(cc1, ["ee"]))
def testValidateConstraintResource(self):
dom = self.get_cib_resources()
@@ -458,6 +526,14 @@ class UtilsTest(unittest.TestCase):
utils.validate_constraint_resource(dom, "myGroupMaster")
)
self.assertEqual(
+ (True, "", "myBundle"),
+ utils.validate_constraint_resource(dom, "myBundle")
+ )
+ self.assertEqual(
+ (True, "", "myEmptyBundle"),
+ utils.validate_constraint_resource(dom, "myEmptyBundle")
+ )
+ self.assertEqual(
(True, "", "myResource"),
utils.validate_constraint_resource(dom, "myResource")
)
@@ -533,6 +609,19 @@ class UtilsTest(unittest.TestCase):
utils.validate_constraint_resource(dom, "myMasteredGroupedResource")
)
+ message = (
+ "%s is a bundle resource, you should use the bundle id: "
+ "%s when adding constraints. Use --force to override."
+ )
+ self.assertEqual(
+ (
+ False,
+ message % ("myBundledResource", "myBundle"),
+ "myBundle"
+ ),
+ utils.validate_constraint_resource(dom, "myBundledResource")
+ )
+
utils.pcs_options["--force"] = True
self.assertEqual(
(True, "", "myClone"),
@@ -558,6 +647,10 @@ class UtilsTest(unittest.TestCase):
(True, "", "myGroupMaster"),
utils.validate_constraint_resource(dom, "myMasteredGroupedResource")
)
+ self.assertEqual(
+ (True, "", "myBundle"),
+ utils.validate_constraint_resource(dom, "myBundledResource")
+ )
def testValidateXmlId(self):
self.assertEqual((True, ""), utils.validate_xml_id("dummy"))
@@ -1790,12 +1883,14 @@ class UtilsTest(unittest.TestCase):
utils.is_valid_cluster_property, definition, "unknown", "value"
)
- def assert_element_id(self, node, node_id):
+ def assert_element_id(self, node, node_id, tag=None):
self.assertTrue(
isinstance(node, xml.dom.minidom.Element),
"element with id '%s' not found" % node_id
)
self.assertEqual(node.getAttribute("id"), node_id)
+ if tag:
+ self.assertEqual(node.tagName, tag)
class RunParallelTest(unittest.TestCase):
@@ -1820,18 +1915,6 @@ class RunParallelTest(unittest.TestCase):
sorted(['first', 'second'])
)
- def test_wait_for_slower_workers(self):
- log = []
- utils.run_parallel(
- [
- self.fixture_create_worker(log, 'first', .03),
- self.fixture_create_worker(log, 'second'),
- ],
- wait_seconds=.01
- )
-
- self.assertEqual(log, ['second', 'first'])
-
class PrepareNodeNamesTest(unittest.TestCase):
def test_return_original_when_is_in_pacemaker_nodes(self):
@@ -2592,3 +2675,84 @@ class IsNodeStopCauseQuorumLossTest(unittest.TestCase):
quorum_info, False, ["rh70-node2", "rh70-node3"]
)
)
+class CanAddNodeToCluster(unittest.TestCase):
+ def setUp(self):
+ patcher = mock.patch("pcs.utils.check_can_add_node_to_cluster")
+ self.addCleanup(patcher.stop)
+ self.check_can_add = patcher.start()
+
+ def assert_report_list_cause_result(self, report_list, can_add, message):
+ def side_effect(node_communicator, node, report_items):
+ report_items.extend(
+ report_list if isinstance(report_list, list) else [report_list]
+ )
+ self.check_can_add.side_effect = side_effect
+
+ result_can_add, result_message = utils.canAddNodeToCluster(
+ node_communicator = None,
+ node="node1"
+ )
+
+ self.assertEqual((result_can_add, result_message), (can_add, message))
+
+ def assert_report_list_cause_success(self, report_list):
+ self.assert_report_list_cause_result(
+ report_list,
+ can_add=True,
+ message="",
+ )
+
+ def assert_report_list_cause_fail(self, report_list, message):
+ self.assert_report_list_cause_result(
+ report_list,
+ can_add=False,
+ message=message,
+ )
+
+ def test_sucess_on_empty_reports(self):
+ self.assert_report_list_cause_success([])
+
+ def test_sucess_when_no_error_there(self):
+ self.assert_report_list_cause_success(
+ reports.node_communication_error_not_authorized(
+ "node1", "command", "reason",
+ severity=ReportItemSeverity.WARNING
+ )
+ )
+
+ def test_deals_with_no_authorized(self):
+ self.assert_report_list_cause_fail(
+ reports.node_communication_error_not_authorized(
+ "node1", "command", "reason"
+ ),
+ "unable to authenticate to node"
+ )
+
+ def test_deals_with_running_pacemaker_remote(self):
+ self.assert_report_list_cause_fail(
+ reports.cannot_add_node_is_running_service(
+ "node1",
+ "pacemaker_remote"
+ ),
+ "node is running pacemaker_remote"
+ )
+
+ def test_deals_with_node_is_in_cluster(self):
+ self.assert_report_list_cause_fail(
+ reports.cannot_add_node_is_in_cluster("node1"),
+ "node is already in a cluster"
+ )
+
+ def test_deals_with_invalid_response(self):
+ self.assert_report_list_cause_fail(
+ reports.invalid_response_format("node1"),
+ "response parsing error"
+ )
+
+ def test_deals_with_any_other_connection_error(self):
+ self.assert_report_list_cause_fail(
+ reports.node_communication_error_timed_out(
+ "node1", "command", "reason",
+ ),
+ "error checking node availability: reason"
+ )
diff --git a/pcs/test/test_xml_tools.py b/pcs/test/test_xml_tools.py
new file mode 100644
index 0000000..4dea1fd
--- /dev/null
+++ b/pcs/test/test_xml_tools.py
@@ -0,0 +1,167 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import xml_tools as lib
+from pcs.test.tools.assertions import assert_xml_equal
+from pcs.test.tools.pcs_unittest import TestCase
+
+class GetSubElementTest(TestCase):
+ def setUp(self):
+ self.root = etree.Element("root")
+ self.sub = etree.SubElement(self.root, "sub_element")
+
+ def test_sub_element_exists(self):
+ self.assertEqual(
+ self.sub, lib.get_sub_element(self.root, "sub_element")
+ )
+
+ def test_new_no_id(self):
+ assert_xml_equal(
+ '<new_element/>',
+ etree.tostring(
+ lib.get_sub_element(self.root, "new_element")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <root>
+ <sub_element/>
+ <new_element/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+ def test_new_with_id(self):
+ assert_xml_equal(
+ '<new_element id="new_id"/>',
+ etree.tostring(
+ lib.get_sub_element(self.root, "new_element", "new_id")
+ ).decode()
+ )
+ assert_xml_equal(
+ """
+ <root>
+ <sub_element/>
+ <new_element id="new_id"/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+ def test_new_first(self):
+ lib.get_sub_element(self.root, "new_element", "new_id", 0)
+ assert_xml_equal(
+ """
+ <root>
+ <new_element id="new_id"/>
+ <sub_element/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+ def test_new_last(self):
+ lib.get_sub_element(self.root, "new_element", "new_id", None)
+ assert_xml_equal(
+ """
+ <root>
+ <sub_element/>
+ <new_element id="new_id"/>
+ </root>
+ """,
+ etree.tostring(self.root).decode()
+ )
+
+
+class UpdateAttributeRemoveEmpty(TestCase):
+ def setUp(self):
+ self.el = etree.Element(
+ "test_element",
+ {
+ "a": "A",
+ "b": "B",
+ }
+ )
+
+ def assert_xml_equal(self, expected):
+ assert_xml_equal(expected, etree.tostring(self.el).decode())
+
+ def test_set_new_attr(self):
+ lib.update_attribute_remove_empty(self.el, "c", "C")
+ self.assert_xml_equal('<test_element a="A" b="B" c="C" />')
+
+ def test_change_existing_attr(self):
+ lib.update_attribute_remove_empty(self.el, "b", "b1")
+ self.assert_xml_equal('<test_element a="A" b="b1" />')
+
+ def test_remove_existing_attr(self):
+ lib.update_attribute_remove_empty(self.el, "b", "")
+ self.assert_xml_equal('<test_element a="A" />')
+
+ def test_zero_does_not_remove(self):
+ lib.update_attribute_remove_empty(self.el, "b", "0")
+ self.assert_xml_equal('<test_element a="A" b="0" />')
+
+ def test_remove_missing_attr(self):
+ lib.update_attribute_remove_empty(self.el, "c", "")
+ self.assert_xml_equal('<test_element a="A" b="B" />')
+
+ def test_more(self):
+ lib.update_attributes_remove_empty(self.el, {
+ "a": "X",
+ "b": "",
+ "c": "C",
+ "d": "",
+ })
+ self.assert_xml_equal('<test_element a="X" c="C" />')
+
+
+class EtreeElementAttributesToDictTest(TestCase):
+ def setUp(self):
+ self.el = etree.Element(
+ "test_element",
+ {
+ "id": "test_id",
+ "description": "some description",
+ "attribute": "value",
+ }
+ )
+
+ def test_only_existing(self):
+ self.assertEqual(
+ {
+ "id": "test_id",
+ "attribute": "value",
+ },
+ lib.etree_element_attibutes_to_dict(self.el, ["id", "attribute"])
+ )
+
+ def test_only_not_existing(self):
+ self.assertEqual(
+ {
+ "_id": None,
+ "not_existing": None,
+ },
+ lib.etree_element_attibutes_to_dict(
+ self.el, ["_id", "not_existing"]
+ )
+ )
+
+ def test_mix(self):
+ self.assertEqual(
+ {
+ "id": "test_id",
+ "attribute": "value",
+ "not_existing": None,
+ },
+ lib.etree_element_attibutes_to_dict(
+ self.el, ["id", "not_existing", "attribute"]
+ )
+ )
diff --git a/pcs/test/tools/assertions.py b/pcs/test/tools/assertions.py
index 4c8f8df..97e2472 100644
--- a/pcs/test/tools/assertions.py
+++ b/pcs/test/tools/assertions.py
@@ -7,10 +7,21 @@ from __future__ import (
import doctest
from lxml.doctestcompare import LXMLOutputChecker
+from lxml.etree import LXML_VERSION
from pcs.lib.errors import LibraryError
from pcs.test.tools.misc import prepare_diff
+def start_tag_error_text():
+ """lxml 3.7+ gives a longer 'start tag expected' error message,
+ handle it here so multiple tests can just get the appropriate
+ string from this function.
+ """
+ msg = "Start tag expected, '<' not found, line 1, column 1"
+ if LXML_VERSION >= (3, 7, 0, 0):
+ msg += " (<string>, line 1)"
+ return msg
+
def console_report(*lines):
#after lines append last new line
return "\n".join(lines + ("",))
@@ -18,6 +29,18 @@ def console_report(*lines):
class AssertPcsMixin(object):
"""Run pcs command and assert its result"""
+ def assert_pcs_success_all(self, command_list):
+ for command in command_list:
+ stdout, pcs_returncode = self.pcs_runner.run(command)
+ if pcs_returncode != 0:
+ raise AssertionError(
+ (
+ "Command '{0}' does not succeed.\n"
+ "return_code: {1}\n"
+ "stdout:\n{2}"
+ ).format(command, pcs_returncode, stdout)
+ )
+
def assert_pcs_success(self, command, stdout_full=None, stdout_start=None):
full = stdout_full
if stdout_start is None and stdout_full is None:
@@ -36,6 +59,12 @@ class AssertPcsMixin(object):
returncode=1
)
+ def assert_pcs_fail_regardless_of_force(
+ self, command, stdout_full=None, stdout_start=None
+ ):
+ self.assert_pcs_fail(command, stdout_full, stdout_start)
+ self.assert_pcs_fail(command+" --force", stdout_full, stdout_start)
+
def assert_pcs_result(
self, command, stdout_full=None, stdout_start=None, returncode=0
):
@@ -120,14 +149,22 @@ class ExtendedAssertionsMixin(object):
)
-def assert_xml_equal(expected_xml, got_xml):
+def assert_xml_equal(expected_xml, got_xml, context_explanation=""):
checker = LXMLOutputChecker()
if not checker.check_output(expected_xml, got_xml, 0):
- raise AssertionError(checker.output_difference(
- doctest.Example("", expected_xml),
- got_xml,
- 0
- ))
+ raise AssertionError(
+ "{context_explanation}{xml_diff}".format(
+ context_explanation=(
+ "" if not context_explanation
+ else "\n{0}\n".format(context_explanation)
+ ),
+ xml_diff=checker.output_difference(
+ doctest.Example("", expected_xml),
+ got_xml,
+ 0
+ )
+ )
+ )
def assert_report_item_equal(real_report_item, report_item_info):
if not __report_item_equal(real_report_item, report_item_info):
@@ -189,7 +226,8 @@ def __find_report_info(report_info_list, report_item):
report_item.info,
report_item.forceable
)),
- "\n".join(map(repr, report_info_list))
+ "\n".join(map(repr, report_info_list)) if report_info_list
+ else " No report is expected!"
)
)
diff --git a/pcs/test/tools/test/__init__.py b/pcs/test/tools/check/__init__.py
similarity index 100%
rename from pcs/test/tools/test/__init__.py
rename to pcs/test/tools/check/__init__.py
diff --git a/pcs/test/tools/test/test_misc.py b/pcs/test/tools/check/test_misc.py
similarity index 100%
rename from pcs/test/tools/test/test_misc.py
rename to pcs/test/tools/check/test_misc.py
diff --git a/pcs/test/tools/cib.py b/pcs/test/tools/cib.py
new file mode 100644
index 0000000..23ff869
--- /dev/null
+++ b/pcs/test/tools/cib.py
@@ -0,0 +1,64 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from pcs.test.tools.assertions import AssertPcsMixin, assert_xml_equal
+
+def xml_format(xml_string):
+ line_list = xml_string.splitlines()
+ reindented_lines = [line_list[0]]
+ for line in line_list[1:]:
+ leading_spaces = len(line) - len(line.lstrip()) - 4
+ #current indent is 2 spaces desired is 4 spaces
+ indent = " " * 2 * leading_spaces
+ new_line = indent + line.strip()
+ max_line_len = 80 - 12 #12 is indent in this file ;)
+ if new_line.endswith(">") and len(new_line) > max_line_len:
+ last_space = new_line[:max_line_len].rfind(" ")
+ if last_space:
+ closing = "/>" if new_line.endswith("/>") else ">"
+ splited_line = [
+ new_line[:last_space],
+ indent + " " + new_line[last_space : -1 * len(closing)],
+ indent + closing
+ ]
+ reindented_lines.extend(splited_line)
+ continue
+ #append not splited line
+ reindented_lines.append(new_line)
+
+ return "\n".join(reindented_lines)
+
+def get_assert_pcs_effect_mixin(get_cib_part):
+ class AssertPcsEffectMixin(AssertPcsMixin):
+ def assert_resources_xml_in_cib(self, expected_xml_resources):
+ xml = get_cib_part(self.temp_cib)
+ try:
+ assert_xml_equal(expected_xml_resources, xml.decode())
+ except AssertionError as e:
+ raise AssertionError(
+ "{0}\n\nCopy format ;)\n{1}".format(
+ e.args[0],
+ xml_format(xml.decode())
+ )
+ )
+
+ def assert_effect_single(self, command, expected_xml, output=""):
+ self.assert_pcs_success(command, output)
+ self.assert_resources_xml_in_cib(expected_xml)
+
+ def assert_effect(self, alternative_cmds, expected_xml, output=""):
+ alternative_list = (
+ alternative_cmds if isinstance(alternative_cmds, list)
+ else [alternative_cmds]
+ )
+ cib_content = open(self.temp_cib).read()
+ for alternative in alternative_list[:-1]:
+ self.assert_effect_single(alternative, expected_xml, output)
+ open(self.temp_cib, "w").write(cib_content)
+
+ self.assert_effect_single(alternative_list[-1], expected_xml, output)
+ return AssertPcsEffectMixin
diff --git a/pcs/test/tools/color_text_runner/format.py b/pcs/test/tools/color_text_runner/format.py
index b54ad8c..7fb957b 100644
--- a/pcs/test/tools/color_text_runner/format.py
+++ b/pcs/test/tools/color_text_runner/format.py
@@ -116,7 +116,6 @@ def format_error_list(flavour, errors, descriptions, traceback_highlight):
"%s: %s" % (red(flavour), get_description(test, descriptions)),
lightgrey(separator2),
"%s" % format_traceback(err) if traceback_highlight else err,
- "",
])
return line_list
@@ -148,7 +147,7 @@ def format_traceback(err):
else:
formated_err.append(line)
was_prev_path = False
- return "\n".join(formated_err)
+ return "\n".join(formated_err)+"\n"
def format_skips(skip_map):
return [blue("Some tests have been skipped:")] + [
diff --git a/pcs/test/tools/color_text_runner/result.py b/pcs/test/tools/color_text_runner/result.py
index 600b7a3..4fda261 100644
--- a/pcs/test/tools/color_text_runner/result.py
+++ b/pcs/test/tools/color_text_runner/result.py
@@ -23,6 +23,7 @@ def get_text_test_result_class(
slash_last_fail_in_overview=False,
traditional_verbose=False,
traceback_highlight=False,
+ fast_info=False,
):
#TextTestResult is neede here. Direct inheriting from TestResult does not
#work in python 2.6
@@ -40,6 +41,8 @@ def get_text_test_result_class(
self.reportWriter = self.__chooseWriter()(
self.stream,
self.descriptions,
+ traceback_highlight,
+ fast_info,
)
self.skip_map = {}
@@ -53,11 +56,19 @@ def get_text_test_result_class(
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
- self.reportWriter.addError(test, err)
+ self.reportWriter.addError(
+ test,
+ err,
+ traceback=self.errors[-1][1]
+ )
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
- self.reportWriter.addFailure(test, err)
+ self.reportWriter.addFailure(
+ test,
+ err,
+ traceback=self.failures[-1][1]
+ )
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
diff --git a/pcs/test/tools/color_text_runner/writer.py b/pcs/test/tools/color_text_runner/writer.py
index 80e7374..d927cd1 100644
--- a/pcs/test/tools/color_text_runner/writer.py
+++ b/pcs/test/tools/color_text_runner/writer.py
@@ -13,21 +13,26 @@ from pcs.test.tools.color_text_runner.format import (
get_description,
format_module,
format_test_method_name,
+ format_traceback,
)
class Writer(object):
- def __init__(self, stream, descriptions):
+ def __init__(
+ self, stream, descriptions, traceback_highlight=False, fast_info=False,
+ ):
self.stream = stream
self.descriptions = descriptions
+ self.traceback_highlight = traceback_highlight
+ self.fast_info = fast_info
def addSuccess(self, test):
pass
- def addError(self, test, err):
+ def addError(self, test, err, traceback):
pass
- def addFailure(self, test, err):
+ def addFailure(self, test, err, traceback):
pass
def addSkip(self, test, reason):
@@ -42,18 +47,28 @@ class Writer(object):
def addUnexpectedSuccess(self, test):
pass
+ def show_fast_info(self, traceback):
+ if self.fast_info:
+ self.stream.writeln()
+ self.stream.writeln(
+ format_traceback(traceback) if self.traceback_highlight
+ else traceback
+ )
+
class DotWriter(Writer):
def addSuccess(self, test):
self.stream.write(green("."))
self.stream.flush()
- def addError(self, test, err):
+ def addError(self, test, err, traceback):
self.stream.write(red('E'))
self.stream.flush()
+ self.show_fast_info(traceback)
- def addFailure(self, test, err):
+ def addFailure(self, test, err, traceback):
self.stream.write(red('F'))
self.stream.flush()
+ self.show_fast_info(traceback)
def addSkip(self, test, reason):
self.stream.write(blue('s'))
@@ -71,11 +86,13 @@ class StandardVerboseWriter(Writer):
def addSuccess(self, test):
self.stream.writeln(green("OK"))
- def addError(self, test, err):
+ def addError(self, test, err, traceback):
self.stream.writeln(red("ERROR"))
+ self.show_fast_info(traceback)
- def addFailure(self, test, err):
+ def addFailure(self, test, err, traceback):
self.stream.writeln(red("FAIL"))
+ self.show_fast_info(traceback)
def addSkip(self, test, reason):
self.stream.writeln(
@@ -94,8 +111,15 @@ class StandardVerboseWriter(Writer):
self.stream.writeln(red("unexpected success"))
class ImprovedVerboseWriter(StandardVerboseWriter):
- def __init__(self, stream, descriptions):
- super(ImprovedVerboseWriter, self).__init__(stream, descriptions)
+ def __init__(
+ self, stream, descriptions, traceback_highlight=False, fast_info=False,
+ ):
+ super(ImprovedVerboseWriter, self).__init__(
+ stream,
+ descriptions,
+ traceback_highlight,
+ fast_info
+ )
self.last_test = None
def __is_new_module(self, test):
diff --git a/pcs/test/tools/custom_mock.py b/pcs/test/tools/custom_mock.py
index c038d28..86f862b 100644
--- a/pcs/test/tools/custom_mock.py
+++ b/pcs/test/tools/custom_mock.py
@@ -5,20 +5,73 @@ from __future__ import (
unicode_literals,
)
+from pcs.cli.common.reports import LibraryReportProcessorToConsole
+import pcs.common.pcs_pycurl as pycurl
from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs.test.tools.assertions import assert_report_item_list_equal
-class MockLibraryReportProcessor(object):
- def __init__(self):
- self.report_item_list = []
- def process(self, report_item):
- self.process_list([report_item])
+class MockLibraryReportProcessor(LibraryReportProcessorToConsole):
+ def __init__(self, debug=False, raise_on_errors=True):
+ super(MockLibraryReportProcessor, self).__init__(debug)
+ self.raise_on_errors = raise_on_errors
- def process_list(self, report_item_list):
- self.report_item_list.extend(report_item_list)
- errors = [
- item for item in report_item_list
- if item.severity == ReportItemSeverity.ERROR
- ]
- if errors:
+ @property
+ def report_item_list(self):
+ return self.items
+
+ def send(self):
+ errors = []
+ for report_item in self.items:
+ if report_item.severity == ReportItemSeverity.ERROR:
+ errors.append(report_item)
+ if errors and self.raise_on_errors:
raise LibraryError(*errors)
+
+ def assert_reports(self, report_info_list):
+ assert_report_item_list_equal(self.report_item_list, report_info_list)
+
+
+class MockCurl(object):
+ def __init__(self, info, output="", debug_output_list=None, exception=None):
+ self._opts = {}
+ self._info = info if info else {}
+ self._output = output
+ self._debug_output_list = debug_output_list
+ self._exception = exception
+
+ @property
+ def opts(self):
+ return self._opts
+
+ def reset(self):
+ self._opts = {}
+
+ def setopt(self, opt, val):
+ if val is None:
+ self.unsetopt(opt)
+ else:
+ self._opts[opt] = val
+
+ def unsetopt(self, opt):
+ try:
+ del self._opts[opt]
+ except KeyError:
+ pass
+
+ def getinfo(self, opt):
+ try:
+ return self._info[opt]
+ except KeyError:
+ AssertionError("info '#{0}' not defined".format(opt))
+
+ def perform(self):
+ if self._exception:
+ #pylint: disable=raising-bad-type
+ raise self._exception
+ if pycurl.WRITEFUNCTION in self._opts:
+ self._opts[pycurl.WRITEFUNCTION](self._output)
+ if pycurl.DEBUGFUNCTION in self._opts:
+ for msg_type, msg in self._debug_output_list:
+ self._opts[pycurl.DEBUGFUNCTION](msg_type, msg)
+
diff --git a/pcs/test/tools/integration_lib.py b/pcs/test/tools/integration_lib.py
new file mode 100644
index 0000000..3336d51
--- /dev/null
+++ b/pcs/test/tools/integration_lib.py
@@ -0,0 +1,127 @@
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from os import path
+
+from pcs.test.tools.assertions import assert_xml_equal
+
+from pcs import settings
+
+class Call(object):
+ command_completions = {
+ "crm_resource": path.join(settings.pacemaker_binaries, "crm_resource"),
+ "cibadmin": path.join(settings.pacemaker_binaries, "cibadmin"),
+ "crm_mon": path.join(settings.pacemaker_binaries, "crm_mon"),
+ "sbd": settings.sbd_binary,
+ }
+
+ @staticmethod
+ def create_check_stdin_xml(expected_stdin):
+ def stdin_xml_check(stdin, command, order_num):
+ assert_xml_equal(
+ expected_stdin,
+ stdin,
+ (
+ "Trying to run command no. {0}"
+ "\n\n '{1}'\n\nwith expected xml stdin.\n"
+ ).format(order_num, command)
+ )
+ return stdin_xml_check
+
+ def __init__(
+ self, command, stdout="", stderr="", returncode=0, check_stdin=None
+ ):
+ """
+ callable check_stdin raises AssertionError when given stdin doesn't match
+ """
+ self.command = self.__complete_command(command)
+ self.stdout = stdout
+ self.stderr = stderr
+ self.returncode = returncode
+ self.check_stdin = check_stdin if check_stdin else self.__check_no_stdin
+
+ def __complete_command(self, command):
+ for shortcut, full_path in self.command_completions.items():
+ if command.startswith("{0} ".format(shortcut)):
+ return full_path + command[len(shortcut):]
+ return command
+
+ def __check_no_stdin(self, stdin, command, order_num):
+ if stdin:
+ raise AssertionError(
+ (
+ "With command\n\n '{0}'\n\nno stdin expected but was"
+ "\n\n'{1}'"
+ )
+ .format(command, stdin)
+ )
+
+ @property
+ def result(self):
+ return self.stdout, self.stderr, self.returncode
+
+
+class Runner(object):
+ def __init__(self):
+ self.set_runs([])
+
+ def assert_can_take_next_run(self, command, stdin_string):
+ if not self.run_list:
+ raise AssertionError(
+ (
+ "No next run expected, but was:\n '{command}'{stdin}\n"
+ "already launched:\n{already_launched}"
+ ).format(
+ command=command,
+ stdin=(
+ "" if not stdin_string else "\nwith stdin:\n\n{0}\n"
+ .format(stdin_string)
+ ),
+ already_launched=" " + "\n ".join([
+ "'{0}'".format(run.command)
+ for run in self.already_launched_list
+ ])
+ )
+ )
+ return self.run_list.pop(0)
+
+ def assert_command_match(self, expected_command, entered_command):
+ if entered_command != expected_command:
+ raise AssertionError(
+ "As {0}. command expected\n\n '{1}'\n\nbut was\n\n '{2}'"
+ .format(
+ self.current_order_num,
+ expected_command,
+ entered_command
+ )
+ )
+
+ def assert_everything_launched(self):
+ if self.run_list:
+ raise AssertionError(
+ "There are remaining expected commands: \n '{0}'".format(
+ "'\n '".join([call.command for call in self.run_list])
+ )
+ )
+
+ @property
+ def current_order_num(self):
+ return len(self.already_launched_list) + 1
+
+ def run(
+ self, args, stdin_string=None, env_extend=None, binary_output=False
+ ):
+ command = " ".join(args)
+ next_run = self.assert_can_take_next_run(command, stdin_string)
+ self.assert_command_match(next_run.command, command)
+ next_run.check_stdin(stdin_string, command, self.current_order_num)
+ self.already_launched_list.append(next_run)
+ return next_run.result
+
+ def set_runs(self, run_list):
+ self.run_list = run_list
+ self.already_launched_list = []
diff --git a/pcs/test/tools/misc.py b/pcs/test/tools/misc.py
index 4696497..8423cf4 100644
--- a/pcs/test/tools/misc.py
+++ b/pcs/test/tools/misc.py
@@ -10,7 +10,11 @@ import os.path
import re
from pcs import utils
-from pcs.test.tools.pcs_unittest import mock
+from pcs.common.tools import is_string
+from pcs.test.tools.pcs_unittest import (
+ mock,
+ skipUnless,
+)
testdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -36,6 +40,18 @@ def get_test_resource(name):
"""Return full path to a test resource file specified by name"""
return os.path.join(testdir, "resources", name)
+def cmp3(a, b):
+ # python3 doesn't have the cmp function, this is an official workaround
+ # https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
+ return (a > b) - (a < b)
+
+def compare_version(a, b):
+ if a[0] == b[0]:
+ if a[1] == b[1]:
+ return cmp3(a[2], b[2])
+ return cmp3(a[1], b[1])
+ return cmp3(a[0], b[0])
+
def is_minimum_pacemaker_version(cmajor, cminor, crev):
output, dummy_retval = utils.run(["crm_mon", "--version"])
pacemaker_version = output.split("\n")[0]
@@ -44,25 +60,65 @@ def is_minimum_pacemaker_version(cmajor, cminor, crev):
major = int(m.group(1))
minor = int(m.group(2))
rev = int(m.group(3))
- return (
- major > cmajor
- or
- (major == cmajor and minor > cminor)
- or
- (major == cmajor and minor == cminor and rev >= crev)
+ return compare_version((major, minor, rev), (cmajor, cminor, crev)) > -1
+
+def is_minimum_pacemaker_features(cmajor, cminor, crev):
+ output, dummy_retval = utils.run(["pacemakerd", "--features"])
+ features_version = output.split("\n")[1]
+ r = re.compile(r"Supporting v(\d+)\.(\d+)\.(\d+):")
+ m = r.search(features_version)
+ major = int(m.group(1))
+ minor = int(m.group(2))
+ rev = int(m.group(3))
+ return compare_version((major, minor, rev), (cmajor, cminor, crev)) > -1
+
+def skip_unless_pacemaker_version(version_tuple, feature):
+ return skipUnless(
+ is_minimum_pacemaker_version(*version_tuple),
+ "Pacemaker version is too old (must be >= {version}) to test {feature}"
+ .format(
+ version=".".join([str(x) for x in version_tuple]),
+ feature=feature
+ )
+ )
+
+def skip_unless_pacemaker_features(version_tuple, feature):
+ return skipUnless(
+ is_minimum_pacemaker_features(*version_tuple),
+ "Pacemaker must support feature set version {version} to test {feature}"
+ .format(
+ version=".".join([str(x) for x in version_tuple]),
+ feature=feature
+ )
)
-def create_patcher(target_prefix):
+skip_unless_pacemaker_supports_bundle = skip_unless_pacemaker_features(
+ (3, 0, 12),
+ "bundle resources"
+)
+
+def skip_unless_pacemaker_supports_systemd():
+ output, dummy_retval = utils.run(["pacemakerd", "--features"])
+ return skipUnless(
+ "systemd" in output,
+ "Pacemaker does not support systemd resources"
+ )
+
+def create_patcher(target_prefix_or_module):
"""
Return function for patching tests with preconfigured target prefix
- string target_prefix is prefix for patched names. Typicaly tested module
- like for example "pcs.lib.commands.booth". Between target_prefix and target
- is "." (dot)
+ string|module target_prefix_or_module could be:
+ * a prefix for patched names. Typicaly tested module:
+ "pcs.lib.commands.booth"
+ * a (imported) module: pcs.lib.cib
+ Between prefix and target is "." (dot)
"""
+ prefix = target_prefix_or_module
+ if not is_string(target_prefix_or_module):
+ prefix = target_prefix_or_module.__name__
+
def patch(target, *args, **kwargs):
- return mock.patch(
- "{0}.{1}".format(target_prefix, target), *args, **kwargs
- )
+ return mock.patch("{0}.{1}".format(prefix, target), *args, **kwargs)
return patch
def outdent(text):
@@ -72,3 +128,35 @@ def outdent(text):
for line in line_list if line
])
return "\n".join([line[smallest_indentation:] for line in line_list])
+
+def create_setup_patch_mixin(module_specification_or_patcher):
+ """
+ Configure and return SetupPatchMixin
+
+ SetupPatchMixin add method 'setup_patch' to a test case.
+
+ Method setup_patch takes name that should be patched in destination module
+ (see module_specification_or_patcher). Method provide cleanup after test.
+ It is expected to be used in 'setUp' method but should work inside test as
+ well.
+
+ string|callable module_specification_or_patcher can be
+ * callable patcher created via create_patcher:
+ create_patcher("pcs.lib.cib")
+ * name of module: "pcs.lib.cib"
+ * (imported) module: pcs.lib.cib
+ Note that this must be not a callable (can be done via
+ sys.modules[__name__] = something_callable. If is a callable use name
+ of the module instead.
+ """
+ if callable(module_specification_or_patcher):
+ patch_module = module_specification_or_patcher
+ else:
+ patch_module = create_patcher(module_specification_or_patcher)
+
+ class SetupPatchMixin(object):
+ def setup_patch(self, target_suffix, *args, **kwargs):
+ patcher = patch_module(target_suffix, *args, **kwargs)
+ self.addCleanup(patcher.stop)
+ return patcher.start()
+ return SetupPatchMixin
diff --git a/pcs/test/tools/xml.py b/pcs/test/tools/xml.py
index e4a160d..27418b6 100644
--- a/pcs/test/tools/xml.py
+++ b/pcs/test/tools/xml.py
@@ -15,6 +15,12 @@ def dom_get_child_elements(element):
if child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
]
+def etree_to_str(tree):
+ #etree returns string in bytes: b'xml'
+ #python 3 removed .encode() from byte strings
+ #run(...) calls subprocess.Popen.communicate which calls encode...
+ #so there is bytes to str conversion
+ return etree.tostring(tree).decode()
class XmlManipulation(object):
@classmethod
@@ -39,11 +45,7 @@ class XmlManipulation(object):
return self
def __str__(self):
- #etree returns string in bytes: b'xml'
- #python 3 removed .encode() from byte strings
- #run(...) calls subprocess.Popen.communicate which calls encode...
- #so there is bytes to str conversion
- return etree.tostring(self.tree).decode()
+ return etree_to_str(self.tree)
def get_xml_manipulation_creator_from_file(file_name):
diff --git a/pcs/usage.py b/pcs/usage.py
index 35fc1be..c73a103 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -155,26 +155,28 @@ Usage: pcs [-f file] [-h] [commands]...
Control and configure pacemaker and corosync.
Options:
- -h, --help Display usage and exit.
- -f file Perform actions on file instead of active CIB.
- --debug Print all network traffic and external commands run.
- --version Print pcs version information.
+ -h, --help Display usage and exit.
+ -f file Perform actions on file instead of active CIB.
+ --debug Print all network traffic and external commands run.
+ --version Print pcs version information.
+ --request-timeout Timeout for each outgoing request to another node in
+ seconds. Default is 60s.
Commands:
cluster Configure cluster options and nodes.
resource Manage cluster resources.
- stonith Configure fence devices.
- constraint Set resource constraints.
- property Set pacemaker properties.
- acl Set pacemaker access control lists.
- qdevice Manage quorum device provider.
+ stonith Manage fence devices.
+ constraint Manage resource constraints.
+ property Manage pacemaker properties.
+ acl Manage pacemaker access control lists.
+ qdevice Manage quorum device provider on the local host.
quorum Manage cluster quorum settings.
booth Manage booth (cluster ticket manager).
status View cluster status.
config View and manage cluster configuration.
pcsd Manage pcs daemon.
node Manage cluster nodes.
- alert Set pacemaker alerts.
+ alert Manage pacemaker alerts.
"""
# Advanced usage to possibly add later
# --corosync_conf=<corosync file> Specify alternative corosync.conf file
@@ -202,25 +204,27 @@ Commands:
only resource agents matching the filter will be shown). If --nodesc is
used then descriptions of resource agents are not printed.
- describe [<standard>:[<provider>:]]<type>
- Show options for the specified resource.
+ describe [<standard>:[<provider>:]]<type> [--full]
+ Show options for the specified resource. If --full is specified, all
+ options including advanced ones are shown.
create <resource id> [<standard>:[<provider>:]]<type> [resource options]
[op <operation action> <operation options> [<operation action>
<operation options>]...] [meta <meta options>...]
- [--clone <clone options> | --master <master options> |
+ [clone [<clone options>] | master [<master options>] |
--group <group id> [--before <resource id> | --after <resource id>]
- ] [--disabled] [--wait[=n]]
- Create specified resource. If --clone is used a clone resource is
- created. If --master is specified a master/slave resource is created.
- If --group is specified the resource is added to the group named. You
+ | bundle <bundle id>] [--disabled] [--wait[=n]]
+ Create specified resource. If clone is used a clone resource is
+ created. If master is specified a master/slave resource is created.
+ If --group is specified the resource is added to the group named. You
can use --before or --after to specify the position of the added
resource relatively to some resource already existing in the group.
- If --disabled is specified the resource is not started automatically.
- If --wait is specified, pcs will wait up to 'n' seconds for the resource
- to start and then return 0 if the resource is started, or 1 if
- the resource has not yet started. If 'n' is not specified it defaults
- to 60 minutes.
+ If bundle is used, the resource will be created inside of the specified
+ bundle. If --disabled is specified the resource is not started
+ automatically. If --wait is specified, pcs will wait up to 'n' seconds
+ for the resource to start and then return 0 if the resource is started,
+ or 1 if the resource has not yet started. If 'n' is not specified it
+ defaults to 60 minutes.
Example: Create a new resource called 'VirtualIP' with IP address
192.168.0.99, netmask of 32, monitored everything 30 seconds,
on eth2:
@@ -232,22 +236,22 @@ Commands:
Deletes the resource, group, master or clone (and all resources within
the group/master/clone).
- enable <resource id> [--wait[=n]]
- Allow the cluster to start the resource. Depending on the rest of the
- configuration (constraints, options, failures, etc), the resource may
- remain stopped. If --wait is specified, pcs will wait up to 'n' seconds
- for the resource to start and then return 0 if the resource is started,
- or 1 if the resource has not yet started. If 'n' is not specified it
- defaults to 60 minutes.
+ enable <resource id>... [--wait[=n]]
+ Allow the cluster to start the resources. Depending on the rest of the
+ configuration (constraints, options, failures, etc), the resources may
+ remain stopped. If --wait is specified, pcs will wait up to 'n' seconds
+ for the resources to start and then return 0 if the resources are
+ started, or 1 if the resources have not yet started. If 'n' is not
+ specified it defaults to 60 minutes.
- disable <resource id> [--wait[=n]]
- Attempt to stop the resource if it is running and forbid the cluster
- from starting it again. Depending on the rest of the configuration
- (constraints, options, failures, etc), the resource may remain
- started. If --wait is specified, pcs will wait up to 'n' seconds for
- the resource to stop and then return 0 if the resource is stopped or 1
- if the resource has not stopped. If 'n' is not specified it defaults
- to 60 minutes.
+ disable <resource id>... [--wait[=n]]
+ Attempt to stop the resources if they are running and forbid the
+ cluster from starting them again. Depending on the rest of the
+ configuration (constraints, options, failures, etc), the resources may
+ remain started. If --wait is specified, pcs will wait up to 'n' seconds
+ for the resources to stop and then return 0 if the resources are
+ stopped or 1 if the resources have not stopped. If 'n' is not specified
+ it defaults to 60 minutes.
restart <resource id> [node] [--wait=n]
Restart the resource specified. If a node is specified and if the
@@ -281,7 +285,7 @@ Commands:
This is mainly used for debugging resources that fail to demote.
debug-monitor <resource id> [--full]
- This command will force the specified resource to be moniored on this
+ This command will force the specified resource to be monitored on this
node ignoring the cluster recommendations and print the output from
monitoring the resource. Using --full will give more detailed output.
This is mainly used for debugging resources that fail to be monitored.
@@ -389,9 +393,9 @@ Commands:
group remove <group id> <resource id> [resource id] ... [resource id]
[--wait[=n]]
Remove the specified resource(s) from the group, removing the group if
- it no resources remain. If --wait is specified, pcs will wait up to 'n'
- seconds for the operation to finish (including moving resources if
- appropriate) and then return 0 on success or 1 on error. If 'n' is not
+ no resources remain in it. If --wait is specified, pcs will wait up to
+ 'n' seconds for the operation to finish (including moving resources if
+ appropriate) and then return 0 on success or 1 on error. If 'n' is not
specified it defaults to 60 minutes.
ungroup <group id> [resource id] ... [resource id] [--wait[=n]]
@@ -403,10 +407,10 @@ Commands:
defaults to 60 minutes.
clone <resource id | group id> [clone options]... [--wait[=n]]
- Setup up the specified resource or group as a clone. If --wait is
+ Set up the specified resource or group as a clone. If --wait is
specified, pcs will wait up to 'n' seconds for the operation to finish
(including starting clone instances if appropriate) and then return 0
- on success or 1 on error. If 'n' is not specified it defaults to 60
+ on success or 1 on error. If 'n' is not specified it defaults to 60
minutes.
unclone <resource id | group id> [--wait[=n]]
@@ -418,30 +422,55 @@ Commands:
master [<master/slave id>] <resource id | group id> [options] [--wait[=n]]
Configure a resource or group as a multi-state (master/slave) resource.
- If --wait is specified, pcs will wait up to 'n' seconds for the operation
- to finish (including starting and promoting resource instances if
- appropriate) and then return 0 on success or 1 on error. If 'n' is not
- specified it defaults to 60 minutes.
+ If --wait is specified, pcs will wait up to 'n' seconds for the
+ operation to finish (including starting and promoting resource
+ instances if appropriate) and then return 0 on success or 1 on error.
+ If 'n' is not specified it defaults to 60 minutes.
Note: to remove a master you must remove the resource/group it contains.
- manage <resource id> ... [resource n]
- Set resources listed to managed mode (default).
+ bundle create <bundle id> [container [<container type>] <container options>]
+ [network <network options>] [port-map <port options>]...
+ [storage-map <storage options>]... [--wait[=n]]
+ Create a new bundle encapsulating no resources. The bundle can be used
+ either as it is or a resource may be put into it at any time.
+ If the container type is not specified, it defaults to 'docker'.
+ If --wait is specified, pcs will wait up to 'n' seconds for the bundle
+ to start and then return 0 on success or 1 on error. If 'n' is not
+ specified it defaults to 60 minutes.
- unmanage <resource id> ... [resource n]
- Set resources listed to unmanaged mode.
+ bundle update <bundle id> [container <container options>]
+ [network <network options>]
+ [port-map (add <port options>) | (remove <id>...)]...
+ [storage-map (add <storage options>) | (remove <id>...)]...
+ [--wait[=n]]
+ Add, remove or change options to specified bundle. If you wish to update
+ a resource encapsulated in the bundle, use the 'pcs resource update'
+ command instead and specify the resource id. If --wait is specified,
+ pcs will wait up to 'n' seconds for the operation to finish (including
+ moving resources if appropriate) and then return 0 on success or 1 on
+ error. If 'n' is not specified it defaults to 60 minutes.
+
+ manage <resource id>... [--monitor]
+ Set resources listed to managed mode (default). If --monitor is
+ specified, enable all monitor operations of the resources.
+
+ unmanage <resource id>... [--monitor]
+ Set resources listed to unmanaged mode. When a resource is in unmanaged
+ mode, the cluster is not allowed to start nor stop the resource. If
+ --monitor is specified, disable all monitor operations of the
+ resources.
defaults [options]
Set default values for resources, if no options are passed, lists
currently configured defaults.
cleanup [<resource id>] [--node <node>]
- Cleans up the resource in the lrmd (useful to reset the resource status
- and failcount). This tells the cluster to forget the operation history
- of a resource and re-detect its current state. This can be useful to
- purge knowledge of past failures that have since been resolved. If a
- resource id is not specified then all resources/stonith devices will be
- cleaned up. If a node is not specified then resources on all nodes
- will be cleaned up.
+ Make the cluster forget the operation history of the resource and
+ re-detect its current state. This can be useful to purge knowledge of
+ past failures that have since been resolved. If a resource id is not
+ specified then all resources/stonith devices will be cleaned up. If a
+ node is not specified then resources/stonith devices on all nodes will
+ be cleaned up.
failcount show <resource id> [node]
Show current failcount for specified resource from all nodes or
@@ -528,11 +557,11 @@ Configure cluster for use with pacemaker
Commands:
auth [node] [...] [-u username] [-p password] [--force] [--local]
Authenticate pcs to pcsd on nodes specified, or on all nodes
- configured in corosync.conf if no nodes are specified (authorization
+ configured in the local cluster if no nodes are specified (authorization
tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root).
By default all nodes are also authenticated to each other, using
--local only authenticates the local node (and does not authenticate
- the remote nodes with each other). Using --force forces
+ the remote nodes with each other). Using --force forces
re-authentication to occur.
setup [--start [--wait[=<n>]]] [--local] [--enable] --name <cluster name>
@@ -545,8 +574,8 @@ Commands:
[--wait_for_all=<0|1>] [--auto_tie_breaker=<0|1>]
[--last_man_standing=<0|1> [--last_man_standing_window=<time in ms>]]
[--ipv6] [--token <timeout>] [--token_coefficient <timeout>]
- [--join <timeout>] [--consensus <timeout>] [--miss_count_const <count>]
- [--fail_recv_const <failures>]
+ [--join <timeout>] [--consensus <timeout>]
+ [--miss_count_const <count>] [--fail_recv_const <failures>]
Configure corosync and sync configuration out to listed nodes.
--local will only perform changes on the local node,
--start will also start the cluster on the specified nodes,
@@ -565,21 +594,23 @@ Commands:
option is not supported on CMAN clusters.
--token <timeout> sets time in milliseconds until a token loss is
declared after not receiving a token (default 1000 ms)
- --token_coefficient <timeout> sets time in milliseconds used for clusters
- with at least 3 nodes as a coefficient for real token timeout calculation
+ --token_coefficient <timeout> sets time in milliseconds used for
+ clusters with at least 3 nodes as a coefficient for real token
+ timeout calculation
(token + (number_of_nodes - 2) * token_coefficient) (default 650 ms)
This option is not supported on CMAN clusters.
--join <timeout> sets time in milliseconds to wait for join messages
(default 50 ms)
--consensus <timeout> sets time in milliseconds to wait for consensus
- to be achieved before starting a new round of membership configuration
- (default 1200 ms)
+ to be achieved before starting a new round of membership
+ configuration (default 1200 ms)
--miss_count_const <count> sets the maximum number of times on
receipt of a token a message is checked for retransmission before
a retransmission occurs (default 5 messages)
--fail_recv_const <failures> specifies how many rotations of the token
without receiving any messages when messages should be received
- may occur before a new configuration is formed (default 2500 failures)
+ may occur before a new configuration is formed
+ (default 2500 failures)
Configuring Redundant Ring Protocol (RRP)
@@ -596,18 +627,20 @@ Commands:
ttl defaults to 1. If --broadcast is specified, --mcast0/1,
--mcastport0/1 & --ttl0/1 are ignored.
- start [--all] [node] [...] [--wait[=<n>]]
+ start [--all | <node>... ] [--wait[=<n>]]
Start corosync & pacemaker on specified node(s), if a node is not
specified then corosync & pacemaker are started on the local node.
If --all is specified then corosync & pacemaker are started on all
nodes. If --wait is specified, wait up to 'n' seconds for nodes
to start.
- stop [--all] [node] [...]
+ stop [--all | <node>... ] [--request-timeout=<seconds>]
Stop corosync & pacemaker on specified node(s), if a node is not
- specified then corosync & pacemaker are stopped on the local node.
- If --all is specified then corosync & pacemaker are stopped on all
- nodes.
+ specified then corosync & pacemaker are stopped on the local node. If
+ --all is specified then corosync & pacemaker are stopped on all nodes.
+ If the cluster is running resources which take long time to stop, the
+ request may time out before the cluster actually stops. In that case you
+ should consider setting --request-timeout to a suitable value.
kill
Force corosync and pacemaker daemons to stop on the local node
@@ -615,33 +648,22 @@ Commands:
cluster is not running and start it again. If you want to stop cluster
on a node, run pcs cluster stop on that node.
- enable [--all] [node] [...]
- Configure corosync & pacemaker to run on node boot on specified
- node(s), if node is not specified then corosync & pacemaker are
- enabled on the local node. If --all is specified then corosync &
- pacemaker are enabled on all nodes.
+ enable [--all | <node>... ]
+ Configure cluster to run on node boot on specified node(s). If node is
+ not specified then cluster is enabled on the local node. If --all is
+ specified then cluster is enabled on all nodes.
- disable [--all] [node] [...]
- Configure corosync & pacemaker to not run on node boot on specified
- node(s), if node is not specified then corosync & pacemaker are
- disabled on the local node. If --all is specified then corosync &
- pacemaker are disabled on all nodes. Note: this is the default after
- installation.
-
- remote-node add <hostname> <resource id> [options]
- Enables the specified resource as a remote-node resource on the
- specified hostname (hostname should be the same as 'uname -n').
-
- remote-node remove <hostname>
- Disables any resources configured to be remote-node resource on the
- specified hostname (hostname should be the same as 'uname -n').
+ disable [--all | <node>... ]
+ Configure cluster to not run on node boot on specified node(s). If node
+ is not specified then cluster is disabled on the local node. If --all
+ is specified then cluster is disabled on all nodes.
status
View current cluster status (an alias of 'pcs status cluster').
- pcsd-status [node] [...]
- Get current status of pcsd on nodes specified, or on all nodes
- configured in corosync.conf if no nodes are specified.
+ pcsd-status [<node>]...
+ Show current status of pcsd on nodes specified, or on all nodes
+ configured in the local cluster if no nodes are specified.
sync
Sync corosync configuration to all nodes found from current
@@ -656,20 +678,28 @@ Commands:
scope=configuration. Do not specify a scope if you want to edit
the saved CIB using pcs (pcs -f <command>).
- cib-push <filename> [scope=<scope> | --config] [--wait[=<n>]]
+ cib-push <filename> [--wait[=<n>]]
+ [diff-against=<filename_original> | scope=<scope> | --config]
Push the raw xml from <filename> to the CIB (Cluster Information Base).
You can obtain the CIB by running the 'pcs cluster cib' command, which
is recommended first step when you want to perform desired
modifications (pcs -f <command>) for the one-off push.
+ If diff-against is specified, pcs diffs contents of filename against
+ contents of filename_original and pushes the result to the CIB.
Specify scope to push a specific section of the CIB. Valid values
of the scope are: configuration, nodes, resources, constraints,
crm_config, rsc_defaults, op_defaults. --config is the same as
scope=configuration. Use of --config is recommended. Do not specify
a scope if you need to push the whole CIB or be warned in the case
- of outdated CIB. If --wait is specified wait up to 'n' seconds for
- changes to be applied.
+ of outdated CIB.
+ If --wait is specified wait up to 'n' seconds for changes to be applied.
WARNING: the selected scope of the CIB will be overwritten by the
current content of the specified file.
+ Example:
+ pcs cluster cib > original.xml
+ cp original.xml new.xml
+ pcs -f new.xml constraint location apache prefers node2
+ pcs cluster cib-push new.xml diff-against=original.xml
cib-upgrade
Upgrade the CIB to conform to the latest version of the document schema.
@@ -684,20 +714,53 @@ Commands:
the whole CIB or be warned in the case of outdated CIB.
node add <node[,node-altaddr]> [--start [--wait[=<n>]]] [--enable]
- [--watchdog=<watchdog-path>]
- Add the node to corosync.conf and corosync on all nodes in the cluster
- and sync the new corosync.conf to the new node. If --start is
- specified also start corosync/pacemaker on the new node, if --wait is
- sepcified wait up to 'n' seconds for the new node to start. If --enable
- is specified enable corosync/pacemaker on new node.
+ [--watchdog=<watchdog-path>] [--device=<path>] ...
+ Add the node to the cluster and sync all relevant configuration files
+ to the new node. If --start is specified also start cluster on the new
+ node, if --wait is specified wait up to 'n' seconds for the new node to
+ start. If --enable is specified configure cluster to start on the new
+ node on boot.
When using Redundant Ring Protocol (RRP) with udpu transport, specify
the ring 0 address first followed by a ',' and then the ring 1 address.
- Use --watchdog to specify path to watchdog on newly added node, when SBD
- is enabled in cluster.
+ Use --watchdog to specify path to watchdog on newly added node, when
+ SBD is enabled in cluster. If SBD is configured with shared storage,
+ use --device to specify path to shared device on new node.
+ This command can only be run on an existing cluster node.
node remove <node>
- Shutdown specified node and remove it from pacemaker and corosync on
- all other nodes in the cluster.
+ Shutdown specified node and remove it from the cluster.
+
+ node add-remote <node host> [<node name>] [options]
+ [op <operation action> <operation options> [<operation action>
+ <operation options>]...] [meta <meta options>...] [--wait[=<n>]]
+ Add the node to the cluster as a remote node. Sync all relevant
+ configuration files to the new node. Start the node and configure it to
+ start the cluster on boot.
+ Options are port and reconnect_interval. Operations and meta
+ belong to an underlying connection resource (ocf:pacemaker:remote).
+ If --wait is specified, wait up to 'n' seconds for the node to start.
+
+ node remove-remote <node identifier>
+ Shutdown specified remote node and remove it from the cluster.
+ The node-identifier can be the name of the node or the address of the
+ node.
+
+ node add-guest <node host> <resource id> [options] [--wait[=<n>]]
+ Make the specified resource a guest node resource. Sync all relevant
+ configuration files to the new node. Start the node and configure it to
+ start the cluster on boot.
+ Options are remote-addr, remote-port and remote-connect-timeout.
+ If --wait is specified, wait up to 'n' seconds for the node to start.
+
+ node remove-guest <node identifier>
+ Shutdown specified guest node and remove it from the cluster.
+ The node-identifier can be the name of the node or the address of the
+ node or id of the resource that is used as the guest node.
+
+ node clear <node name>
+ Remove specified node from various cluster caches. Use this if a
+ removed node is still considered by the cluster to be a member of the
+ cluster.
uidgid
List the current configured uids and gids of users allowed to connect
@@ -720,10 +783,10 @@ Commands:
destroy [--all]
Permanently destroy the cluster on the current node, killing all
- corosync/pacemaker processes removing all cib files and the
- corosync.conf file. Using --all will attempt to destroy the
- cluster on all nodes configure in the corosync.conf file.
- WARNING: This command permantly removes any cluster configuration that
+ cluster processes and removing all cluster configuration files. Using
+ --all will attempt to destroy the cluster on all nodes in the local
+ cluster.
+ WARNING: This command permanently removes any cluster configuration that
has been created. It is recommended to run 'pcs cluster stop' before
destroying the cluster.
@@ -733,7 +796,7 @@ Commands:
performed on the currently running cluster. If -V is used
more verbose output will be printed.
- report [--from "YYYY-M-D H:M:S" [--to "YYYY-M-D" H:M:S"]] dest
+ report [--from "YYYY-M-D H:M:S" [--to "YYYY-M-D H:M:S"]] dest
Create a tarball containing everything needed when reporting cluster
problems. If --from and --to are not used, the report will include
the past 24 hours.
@@ -759,13 +822,25 @@ Commands:
only stonith agents matching the filter will be shown). If --nodesc is
used then descriptions of stonith agents are not printed.
- describe <stonith agent>
- Show options for specified stonith agent.
+ describe <stonith agent> [--full]
+ Show options for specified stonith agent. If --full is specified, all
+ options including advanced ones are shown.
create <stonith id> <stonith device type> [stonith device options]
[op <operation action> <operation options> [<operation action>
<operation options>]...] [meta <meta options>...]
+ [--group <group id> [--before <stonith id> | --after <stonith id>]]
+ [--disabled] [--wait[=n]]
Create stonith device with specified type and options.
+ If --group is specified the stonith device is added to the group named.
+ You can use --before or --after to specify the position of the added
+ stonith device relatively to some stonith device already existing in the
+ group.
+ If --disabled is specified the stonith device is not used.
+ If --wait is specified, pcs will wait up to 'n' seconds for the stonith
+ device to start and then return 0 if the stonith device is started, or 1
+ if the stonith device has not yet started. If 'n' is not specified it
+ defaults to 60 minutes.
update <stonith id> [stonith device options]
Add/Change options to specified stonith id.
@@ -773,35 +848,56 @@ Commands:
delete <stonith id>
Remove stonith id from configuration.
- cleanup [<stonith id>] [--node <node>]
- Cleans up the stonith device in the lrmd (useful to reset the status
- and failcount). This tells the cluster to forget the operation history
- of a stonith device and re-detect its current state. This can be
- useful to purge knowledge of past failures that have since been
- resolved. If a stonith id is not specified then all resources/stonith
- devices will be cleaned up. If a node is not specified then resources
- on all nodes will be cleaned up.
-
- level
- Lists all of the fencing levels currently configured.
+ enable <stonith id> [--wait[=n]]
+ Allow the cluster to use the stonith device. If --wait is specified, pcs
+ will wait up to 'n' seconds for the stonith device to start and then
+ return 0 if the stonith device is started, or 1 if the stonith device
+ has not yet started. If 'n' is not specified it defaults to 60 minutes.
- level add <level> <node> <devices>
- Add the fencing level for the specified node with a comma separated
- list of devices (stonith ids) to attempt for that node at that level.
- Fence levels are attempted in numerical order (starting with 1) if
- a level succeeds (meaning all devices are successfully fenced in that
- level) then no other levels are tried, and the node is considered
- fenced.
+ disable <stonith id> [--wait[=n]]
+ Attempt to stop the stonith device if it is running and disallow the
+ cluster to use it. If --wait is specified, pcs will wait up to 'n'
+ seconds for the stonith device to stop and then return 0 if the stonith
+ device is stopped or 1 if the stonith device has not stopped. If 'n' is
+ not specified it defaults to 60 minutes.
- level remove <level> [node id] [stonith id] ... [stonith id]
- Removes the fence level for the level, node and/or devices specified.
- If no nodes or devices are specified then the fence level is removed.
+ cleanup [<stonith id>] [--node <node>]
+ Make the cluster forget the operation history of the stonith device and
+ re-detect its current state. This can be useful to purge knowledge of
+ past failures that have since been resolved. If a stonith id is not
+ specified then all resources/stonith devices will be cleaned up. If a
+ node is not specified then resources/stonith devices on all nodes will
+ be cleaned up.
+
+ level [config]
+ Lists all of the fencing levels currently configured.
- level clear [node|stonith id(s)]
- Clears the fence levels on the node (or stonith id) specified or clears
- all fence levels if a node/stonith id is not specified. If more than
- one stonith id is specified they must be separated by a comma and no
- spaces. Example: pcs stonith level clear dev_a,dev_b
+ level add <level> <target> <stonith id> [stonith id]...
+ Add the fencing level for the specified target with the list of stonith
+ devices to attempt for that target at that level. Fence levels are
+ attempted in numerical order (starting with 1). If a level succeeds
+ (meaning all devices are successfully fenced in that level) then no
+ other levels are tried, and the target is considered fenced.
+ Target may be a node name <node_name> or %<node_name> or
+ node%<node_name>, a node name regular expression regexp%<node_pattern>
+ or a node attribute value attrib%<name>=<value>.
+
+ level remove <level> [target] [stonith id]...
+ Removes the fence level for the level, target and/or devices specified.
+ If no target or devices are specified then the fence level is removed.
+ Target may be a node name <node_name> or %<node_name> or
+ node%<node_name>, a node name regular expression regexp%<node_pattern>
+ or a node attribute value attrib%<name>=<value>.
+
+ level clear [target|stonith id(s)]
+ Clears the fence levels on the target (or stonith id) specified or
+ clears all fence levels if a target/stonith id is not specified. If
+ more than one stonith id is specified they must be separated by a comma
+ and no spaces.
+ Target may be a node name <node_name> or %<node_name> or
+ node%<node_name>, a node name regular expression regexp%<node_pattern>
+ or a node attribute value attrib%<name>=<value>.
+ Example: pcs stonith level clear dev_a,dev_b
level verify
Verifies all fence devices and nodes specified in fence levels exist.
@@ -824,20 +920,25 @@ Commands:
in order to be able to work with nodes not visible from the local
cluster partition.
- sbd enable [--watchdog=<path>[@<node>]] ... [<SBD_OPTION>=<value>] ...
+ sbd enable [--watchdog=<path>[@<node>]] ... [--device=<path>[@<node>]] ...
+ [<SBD_OPTION>=<value>] ...
Enable SBD in cluster. Default path for watchdog device is
/dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5),
- SBD_DELAY_START (default: no) and SBD_STARTMODE (default: clean).
+ SBD_DELAY_START (default: no) and SBD_STARTMODE (default: always). It is
+ possible to specify up to 3 devices per node.
WARNING: Cluster has to be restarted in order to apply these changes.
Example of enabling SBD in cluster with watchdogs on node1 will be
/dev/watchdog2, on node2 /dev/watchdog1, /dev/watchdog0 on all other
- nodes and watchdog timeout will bet set to 10 seconds:
+ nodes, device /dev/sdb on node1, device /dev/sda on all other nodes and
+ watchdog timeout will bet set to 10 seconds:
pcs stonith sbd enable \\
--watchdog=/dev/watchdog2 at node1 \\
--watchdog=/dev/watchdog1 at node2 \\
--watchdog=/dev/watchdog0 \\
+ --device=/dev/sdb at node1 \\
+ --device=/dev/sda \\
SBD_WATCHDOG_TIMEOUT=10
sbd disable
@@ -845,8 +946,22 @@ Commands:
WARNING: Cluster has to be restarted in order to apply these changes.
- sbd status
- Show status of SBD services in cluster.
+ sbd device setup --device=<path> [--device=<path>]...
+ [watchdog-timeout=<integer>] [allocate-timeout=<integer>]
+ [loop-timeout=<integer>] [msgwait-timeout=<integer>]
+ Initialize SBD structures on device(s) with specified timeouts.
+
+ WARNING: All content on device(s) will be overwritten.
+
+ sbd device message <device-path> <node> <message-type>
+ Manually set a message of the specified type on the device for the node.
+ Possible message types (they are documented in sbd(8) man page): test,
+ reset, off, crashdump, exit, clear
+
+ sbd status [--full]
+ Show status of SBD services in cluster and local device(s) configured.
+ If --full is specified, also dump of SBD headers on device(s)
+ will be shown.
sbd config
Show SBD configuration in cluster.
@@ -901,20 +1016,24 @@ Manage resource constraints
Commands:
[list|show] --full
- List all current location, order and colocation constraints, if --full
- is specified also list the constraint ids.
-
- location <resource id> prefers <node[=score]>...
- Create a location constraint on a resource to prefer the specified
- node and score (default score: INFINITY).
-
- location <resource id> avoids <node[=score]>...
- Create a location constraint on a resource to avoid the specified
- node and score (default score: INFINITY).
-
- location <resource id> rule [id=<rule id>] [resource-discovery=<option>]
+ List all current constraints. If --full is specified also list the
+ constraint ids.
+
+ location <resource> prefers <node>[=<score>] [<node>[=<score>]]...
+ Create a location constraint on a resource to prefer the specified node
+ with score (default score: INFINITY). Resource may be either a resource
+ id <resource_id> or %<resource_id> or resource%<resource_id>, or a
+ resource name regular expression regexp%<resource_pattern>.
+
+ location <resource> avoids <node>[=<score>] [<node>[=<score>]]...
+ Create a location constraint on a resource to avoid the specified node
+ with score (default score: INFINITY). Resource may be either a resource
+ id <resource_id> or %<resource_id> or resource%<resource_id>, or a
+ resource name regular expression regexp%<resource_pattern>.
+
+ location <resource> rule [id=<rule id>] [resource-discovery=<option>]
[role=master|slave] [constraint-id=<id>]
- [score=<score>|score-attribute=<attribute>] <expression>
+ [score=<score> | score-attribute=<attribute>] <expression>
Creates a location rule on the specified resource where the expression
looks like one of the following:
defined|not_defined <attribute>
@@ -927,24 +1046,30 @@ Commands:
( <expression> )
where duration options and date spec options are: hours, monthdays,
weekdays, yeardays, months, weeks, years, weekyears, moon.
- If score is omitted it defaults to INFINITY. If id is omitted one is
- generated from the resource id. If resource-discovery is omitted it
- defaults to 'always'.
-
- location [show [resources|nodes [node id|resource id]...] [--full]]
- List all the current location constraints, if 'resources' is specified
- location constraints are displayed per resource (default), if 'nodes'
- is specified location constraints are displayed per node. If specific
+ Resource may be either a resource id <resource_id> or %<resource_id> or
+ resource%<resource_id>, or a resource name regular expression
+ regexp%<resource_pattern>. If score is omitted it defaults to INFINITY.
+ If id is omitted one is generated from the resource id. If
+ resource-discovery is omitted it defaults to 'always'.
+
+ location [show [resources|nodes [<node> | <resource>]...] [--full]]
+ List all the current location constraints. If 'resources' is specified,
+ location constraints are displayed per resource (default). If 'nodes'
+ is specified, location constraints are displayed per node. If specific
nodes or resources are specified then we only show information about
- them. If --full is specified show the internal constraint id's as well.
+ them. Resource may be either a resource id <resource_id> or
+ %<resource_id> or resource%<resource_id>, or a resource name regular
+ expression regexp%<resource_pattern>. If --full is specified show the
+ internal constraint id's as well.
- location add <id> <resource id> <node> <score> [resource-discovery=<option>]
- Add a location constraint with the appropriate id, resource id,
- node name and score. (For more advanced pacemaker usage.)
+ location add <id> <resource> <node> <score> [resource-discovery=<option>]
+ Add a location constraint with the appropriate id for the specified
+ resource, node name and score. Resource may be either a resource id
+ <resource_id> or %<resource_id> or resource%<resource_id>, or a
+ resource name regular expression regexp%<resource_pattern>.
- location remove <id> [<resource id> <node> <score>]
- Remove a location constraint with the appropriate id, resource id,
- node name and score. (For more advanced pacemaker usage.)
+ location remove <id>
+ Remove a location constraint with the appropriate id.
order [show] [--full]
List all current ordering constraints (if --full is specified show
@@ -961,10 +1086,10 @@ Commands:
<resourceX> ... [options]]
[setoptions [constraint_options]]
Create an ordered set of resources.
- Available options are sequential=true/false, require-all=true/false,
- action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
- Available constraint_options are id=<constraint-id>,
- kind=Optional/Mandatory/Serialize and symmetrical=true/false.
+ Available options are sequential=true/false, require-all=true/false and
+ action=start/promote/demote/stop. Available constraint_options are
+ id=<constraint-id>, kind=Optional/Mandatory/Serialize and
+ symmetrical=true/false.
order remove <resource1> [resourceN]...
Remove resource from any ordering constraint
@@ -988,10 +1113,9 @@ Commands:
[set <resourceX> ... [options]]
[setoptions [constraint_options]]
Create a colocation constraint with a resource set.
- Available options are sequential=true/false, require-all=true/false,
- action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
- Available constraint_options are id, score, score-attribute and
- score-attribute-mangle.
+ Available options are sequential=true/false and
+ role=Stopped/Started/Master/Slave. Available constraint_options are id
+ and either of: score, score-attribute, score-attribute-mangle.
colocation remove <source resource id> <target resource id>
Remove colocation constraints with specified resources.
@@ -1010,15 +1134,14 @@ Commands:
[set <resourceX> ... [<options>]]
setoptions <constraint_options>
Create a ticket constraint with a resource set.
- Available options are sequential=true/false, require-all=true/false,
- action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
- Required constraint option is ticket=<ticket>. Optional constraint
- options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
+ Available options are role=Stopped/Started/Master/Slave. Required
+ constraint option is ticket=<ticket>. Optional constraint options are
+ id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
ticket remove <ticket> <resource id>
Remove all ticket constraints with <ticket> from <resource id>.
- remove [constraint id]...
+ remove <constraint id>...
Remove constraint(s) or constraint rules with the specified id(s).
ref <resource>...
@@ -1037,8 +1160,8 @@ Commands:
<expression> and|or <expression>
( <expression> )
where duration options and date spec options are: hours, monthdays,
- weekdays, yeardays, months, weeks, years, weekyears, moon
- If score is ommited it defaults to INFINITY. If id is ommited one is
+ weekdays, yeardays, months, weeks, years, weekyears, moon.
+ If score is omitted it defaults to INFINITY. If id is omitted one is
generated from the constraint id.
rule remove <rule id>
@@ -1148,15 +1271,16 @@ Commands:
--full will give more detailed output. If <cluster name> is specified,
only information about the specified cluster will be displayed.
- nodes [corosync|both|config]
+ nodes [corosync | both | config]
View current status of nodes from pacemaker. If 'corosync' is
- specified, print nodes currently configured in corosync, if 'both'
- is specified, print nodes from both corosync & pacemaker. If 'config'
- is specified, print nodes from corosync & pacemaker configuration.
+ specified, view current status of nodes from corosync instead. If
+ 'both' is specified, view current status of nodes from both corosync &
+ pacemaker. If 'config' is specified, print nodes from corosync &
+ pacemaker configuration.
- pcsd [<node>] ...
- Show the current status of pcsd on the specified nodes.
- When no nodes are specified, status of all nodes is displayed.
+ pcsd [<node>]...
+ Show current status of pcsd on nodes specified, or on all nodes
+ configured in the local cluster if no nodes are specified.
xml
View xml version of status (output from crm_mon -r -1 -X).
@@ -1243,9 +1367,8 @@ Commands:
Load custom certificate and key files for use in pcsd.
sync-certificates
- Sync pcsd certificates to all nodes found from current corosync.conf
- file (cluster.conf on systems running Corosync 1.x). WARNING: This will
- restart pcsd daemon on the nodes.
+ Sync pcsd certificates to all nodes in the local cluster.
+ WARNING: This will restart pcsd daemon on the nodes.
clear-auth [--local] [--remote]
Removes all system tokens which allow pcs/pcsd on the current system to
@@ -1275,34 +1398,42 @@ Commands:
of specified node. Attributes can be removed by setting an attribute
without a value.
- maintenance [--all] | [<node>]...
- Put specified node(s) into maintenance mode, if no node or options are
+ maintenance [--all | <node>...] [--wait[=n]]
+ Put specified node(s) into maintenance mode, if no nodes or options are
specified the current node will be put into maintenance mode, if --all
- is specified all nodes will be put into maintenace mode.
+ is specified all nodes will be put into maintenance mode.
+ If --wait is specified, pcs will wait up to 'n' seconds for the node(s)
+ to be put into maintenance mode and then return 0 on success or 1 if
+ the operation not succeeded yet. If 'n' is not specified it defaults
+ to 60 minutes.
- unmaintenance [--all] | [<node>]...
- Remove node(s) from maintenance mode, if no node or options are
+ unmaintenance [--all | <node>...] [--wait[=n]]
+ Remove node(s) from maintenance mode, if no nodes or options are
specified the current node will be removed from maintenance mode,
if --all is specified all nodes will be removed from maintenance mode.
+ If --wait is specified, pcs will wait up to 'n' seconds for the node(s)
+ to be removed from maintenance mode and then return 0 on success or 1 if
+ the operation not succeeded yet. If 'n' is not specified it defaults
+ to 60 minutes.
- standby [--all | <node>] [--wait[=n]]
- Put specified node into standby mode (the node specified will no longer
- be able to host resources), if no node or options are specified the
- current node will be put into standby mode, if --all is specified all
- nodes will be put into standby mode.
+ standby [--all | <node>...] [--wait[=n]]
+ Put specified node(s) into standby mode (the node specified will no
+ longer be able to host resources), if no nodes or options are specified
+ the current node will be put into standby mode, if --all is specified
+ all nodes will be put into standby mode.
If --wait is specified, pcs will wait up to 'n' seconds for the node(s)
to be put into standby mode and then return 0 on success or 1 if
- the operation not succeeded yet. If 'n' is not specified it defaults
+ the operation not succeeded yet. If 'n' is not specified it defaults
to 60 minutes.
- unstandby [--all | <node>] [--wait[=n]]
- Remove node from standby mode (the node specified will now be able to
- host resources), if no node or options are specified the current node
+ unstandby [--all | <node>...] [--wait[=n]]
+ Remove node(s) from standby mode (the node specified will now be able to
+ host resources), if no nodes or options are specified the current node
will be removed from standby mode, if --all is specified all nodes will
be removed from standby mode.
If --wait is specified, pcs will wait up to 'n' seconds for the node(s)
to be removed from standby mode and then return 0 on success or 1 if
- the operation not succeeded yet. If 'n' is not specified it defaults
+ the operation not succeeded yet. If 'n' is not specified it defaults
to 60 minutes.
utilization [[<node>] [--name <name>] | <node> <name>=<value> ...]
@@ -1378,11 +1509,11 @@ Commands:
Show quorum runtime status.
device add [<generic options>] model <device model> [<model options>]
- Add a quorum device to the cluster. Quorum device needs to be created
- first by "pcs qdevice setup" command. It is not possible to use more
- than one quorum device in a cluster simultaneously. Generic options,
- model and model options are all documented in corosync's
- corosync-qdevice(8) man page.
+ Add a quorum device to the cluster. Quorum device needs to be created
+ first by "pcs qdevice setup" command. It is not possible to use more
+ than one quorum device in a cluster simultaneously. Generic options,
+ model and model options are all documented in corosync-qdevice(8) man
+ page.
device remove
Remove a quorum device from the cluster.
@@ -1392,9 +1523,9 @@ Commands:
output.
device update [<generic options>] [model <model options>]
- Add/Change quorum device options. Generic options and model options are
- all documented in corosync's corosync-qdevice(8) man page. Requires
- the cluster to be stopped.
+ Add/Change quorum device options. Generic options and model options are
+ all documented in corosync-qdevice(8) man page. Requires the cluster to
+ be stopped.
WARNING: If you want to change "host" option of qdevice model net, use
"pcs quorum device remove" and "pcs quorum device add" commands
@@ -1522,7 +1653,7 @@ Commands:
update <alert-id> [path=<path>] [description=<description>]
[options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
- Update existing alert handler with specified id.
+ Update an existing alert handler with specified id.
remove <alert-id> ...
Remove alert handlers with specified ids.
@@ -1535,7 +1666,7 @@ Commands:
recipient update <recipient-id> [value=<recipient-value>]
[description=<description>] [options [<option>=<value>]...]
[meta [<meta-option>=<value>]...]
- Update existing recipient identified by it's id.
+ Update an existing recipient identified by its id.
recipient remove <recipient-id> ...
Remove specified recipients.
diff --git a/pcs/utils.py b/pcs/utils.py
index 50f00bc..4753b87 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -8,8 +8,6 @@ from __future__ import (
import os
import sys
import subprocess
-import ssl
-import inspect
import xml.dom.minidom
from xml.dom.minidom import parseString, parse
import xml.etree.ElementTree as ET
@@ -25,19 +23,34 @@ import base64
import threading
import logging
-
from pcs import settings, usage
-from pcs.cli.common.reports import (
- process_library_reports,
- LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
+
+from pcs.common import (
+ pcs_pycurl as pycurl,
+ report_codes,
)
from pcs.common.tools import (
join_multilines,
simple_cache,
)
+
+from pcs.cli.common import (
+ console_report,
+ middleware,
+)
+from pcs.cli.common.env_cli import Env
+from pcs.cli.common.lib_wrapper import Library
+from pcs.cli.common.reports import (
+ build_report_message,
+ process_library_reports,
+ LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
+)
+from pcs.cli.booth.command import DEFAULT_BOOTH_NAME
+import pcs.cli.booth.env
+
from pcs.lib import reports, sbd
from pcs.lib.env import LibraryEnvironment
-from pcs.lib.errors import LibraryError
+from pcs.lib.errors import LibraryError, ReportListAnalyzer
from pcs.lib.external import (
CommandRunner,
disable_service,
@@ -45,30 +58,24 @@ from pcs.lib.external import (
enable_service,
EnableServiceError,
is_cman_cluster as lib_is_cman_cluster,
+ is_proxy_set,
is_service_enabled,
is_service_running,
is_systemctl,
_service,
_systemctl,
)
-import pcs.lib.resource_agent as lib_ra
import pcs.lib.corosync.config_parser as corosync_conf_parser
from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
-from pcs.lib.pacemaker import has_resource_wait_support
-from pcs.lib.pacemaker_state import ClusterState
-from pcs.lib.pacemaker_values import(
- validate_id,
+from pcs.lib.nodes_task import check_can_add_node_to_cluster
+from pcs.lib.pacemaker.live import has_wait_for_idle_support
+from pcs.lib.pacemaker.state import ClusterState
+from pcs.lib.pacemaker.values import(
is_boolean,
+ is_score as is_score_value,
timeout_to_seconds as get_timeout_seconds,
- is_score_value,
+ validate_id,
)
-from pcs.cli.common import middleware
-from pcs.cli.common.env import Env
-from pcs.cli.common.lib_wrapper import Library
-from pcs.cli.common.reports import build_report_message
-from pcs.cli.booth.command import DEFAULT_BOOTH_NAME
-import pcs.cli.booth.env
-
try:
# python2
@@ -76,36 +83,10 @@ try:
except ImportError:
# python3
from urllib.parse import urlencode as urllib_urlencode
-try:
- # python2
- from urllib2 import (
- build_opener as urllib_build_opener,
- install_opener as urllib_install_opener,
- HTTPCookieProcessor as urllib_HTTPCookieProcessor,
- HTTPSHandler as urllib_HTTPSHandler,
- HTTPError as urllib_HTTPError,
- URLError as urllib_URLError
- )
-except ImportError:
- # python3
- from urllib.request import (
- build_opener as urllib_build_opener,
- install_opener as urllib_install_opener,
- HTTPCookieProcessor as urllib_HTTPCookieProcessor,
- HTTPSHandler as urllib_HTTPSHandler
- )
- from urllib.error import (
- HTTPError as urllib_HTTPError,
- URLError as urllib_URLError
- )
-
-
PYTHON2 = sys.version[0] == "2"
-DEFAULT_RESOURCE_ACTIONS = ["monitor", "start", "stop", "promote", "demote"]
-
# usefile & filename variables are set in pcs module
usefile = False
filename = ""
@@ -146,6 +127,20 @@ def cluster_upgrade():
err("unable to upgrade cluster: %s" % output)
print("Cluster CIB has been upgraded to latest version")
+def cluster_upgrade_to_version(required_version):
+ checkAndUpgradeCIB(*required_version)
+ dom = get_cib_dom()
+ current_version = getValidateWithVersion(dom)
+ if current_version < required_version:
+ err(
+ console_report.CODE_TO_MESSAGE_BUILDER_MAP[
+ report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION
+ ]({
+ "required_version": ".".join([str(x) for x in required_version]),
+ "current_version": ".".join([str(x) for x in current_version]),
+ })
+ )
+ return dom
# Check status of node
def checkStatus(node):
@@ -155,6 +150,11 @@ def checkStatus(node):
def checkAuthorization(node):
return sendHTTPRequest(node, 'remote/check_auth', None, False, False)
+def getPcsdInstanceSignature(node):
+ return sendHTTPRequest(
+ node, 'remote/pcsd_instance_signature', None, False, False
+ )
+
def get_uid_gid_file_name(uid, gid):
return "pcs-uidgid-%s-%s" % (uid, gid)
@@ -236,6 +236,23 @@ def readTokens():
tokens = output['data']
return tokens
+def repeat_if_timeout(send_http_request_function, repeat_count=15):
+ def repeater(node, *args, **kwargs):
+ repeats_left = repeat_count
+ while True:
+ retval, output = send_http_request_function(node, *args, **kwargs)
+ if (
+ retval != 2 or "Operation timed out" not in output
+ or
+ repeats_left < 1
+ ):
+ # did not timed out OR repeat limit exceeded
+ return retval, output
+ repeats_left = repeats_left - 1
+ if "--debug" in pcs_options:
+ print("{0}: {1}, trying again...". format(node, output))
+ return repeater
+
# Set the corosync.conf file on the specified node
def getCorosyncConfig(node):
return sendHTTPRequest(node, 'remote/get_corosync_conf', None, False, False)
@@ -272,14 +289,23 @@ def stopCorosync(node, quiet=False, force=True):
def stopCluster(node, quiet=False, pacemaker=True, corosync=True, force=True):
data = dict()
+ timeout = None
if pacemaker and not corosync:
data["component"] = "pacemaker"
+ timeout = 2 * 60
elif corosync and not pacemaker:
data["component"] = "corosync"
if force:
data["force"] = 1
data = urllib_urlencode(data)
- return sendHTTPRequest(node, 'remote/cluster_stop', data, False, not quiet)
+ return sendHTTPRequest(
+ node,
+ 'remote/cluster_stop',
+ data,
+ printResult=False,
+ printSuccess=not quiet,
+ timeout=timeout
+ )
def enableCluster(node):
return sendHTTPRequest(node, 'remote/cluster_enable', None, False, True)
@@ -302,24 +328,46 @@ def resumeConfigSyncing(node):
data = urllib_urlencode({"sync_thread_resume": 1})
return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
-def canAddNodeToCluster(node):
- retval, output = sendHTTPRequest(
- node, 'remote/node_available', None, False, False
+def canAddNodeToCluster(node_communicator, node):
+ """
+ Return tuple with two parts. The first part is information if the node can
+ be added to a cluster. The second part is a relevant explanation for first
+ part.
+
+ NodeCommunicator node_communicator provide connection to the node
+ NodeAddresses node contain destination for request
+ """
+ report_list = []
+ check_can_add_node_to_cluster(node_communicator, node, report_list)
+
+ analyzer = ReportListAnalyzer(report_list)
+ if not analyzer.error_list:
+ return True, ""
+
+ first_problem = analyzer.error_list[0]
+
+ report_message_map = {
+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED:
+ "unable to authenticate to node"
+ ,
+ report_codes.CANNOT_ADD_NODE_IS_IN_CLUSTER:
+ "node is already in a cluster"
+ ,
+ report_codes.INVALID_RESPONSE_FORMAT:
+ "response parsing error"
+ ,
+ report_codes.CANNOT_ADD_NODE_IS_RUNNING_SERVICE:
+ "node is running pacemaker_remote"
+ ,
+ }
+
+ if first_problem.code in report_message_map:
+ return False, report_message_map[first_problem.code]
+
+ return False, "error checking node availability{0}".format(
+ ": {0}".format(first_problem.info["reason"])
+ if "reason" in first_problem.info else ""
)
- if retval == 0:
- try:
- myout = json.loads(output)
- if "notauthorized" in myout and myout["notauthorized"] == "true":
- return (False, "unable to authenticate to node")
- if "node_available" in myout and myout["node_available"] == True:
- return (True, "")
- elif myout.get("pacemaker_remote", False):
- return (False, "node is running pacemaker_remote")
- else:
- return (False, "node is already in a cluster")
- except ValueError:
- return (False, "response parsing error")
- return (False, "error checking node availability: {0}".format(output))
def addLocalNode(node, node_to_add, ring1_addr=None):
options = {'new_nodename': node_to_add}
@@ -350,6 +398,7 @@ def removeLocalNode(node, node_to_remove, pacemaker_remove=False):
else:
return 1, output
+
# Send an HTTP request to a node return a tuple with status, data
# If status is 0 then data contains server response
# Otherwise if non-zero then data contains error message
@@ -359,101 +408,133 @@ def removeLocalNode(node, node_to_remove, pacemaker_remove=False):
# 2 = No response,
# 3 = Auth Error
# 4 = Permission denied
-def sendHTTPRequest(host, request, data = None, printResult = True, printSuccess = True):
- url = 'https://' + host + ':2224/' + request
- # enable self-signed certificates
- # https://www.python.org/dev/peps/pep-0476/
- # http://bugs.python.org/issue21308
- if (
- hasattr(ssl, "_create_unverified_context")
- and
- "context" in inspect.getargspec(urllib_HTTPSHandler.__init__).args
- ):
- opener = urllib_build_opener(
- urllib_HTTPSHandler(context=ssl._create_unverified_context()),
- urllib_HTTPCookieProcessor()
- )
- else:
- opener = urllib_build_opener(urllib_HTTPCookieProcessor())
-
- tokens = readTokens()
+def sendHTTPRequest(
+ host, request, data=None, printResult=True, printSuccess=True, timeout=None
+):
+ url = "https://{host}:2224/{request}".format(host=host, request=request)
if "--debug" in pcs_options:
print("Sending HTTP Request to: " + url)
print("Data: {0}".format(data))
- # python3 requires data to by bytes not str
- if data:
- data = data.encode("utf-8")
- # cookies
- cookies = []
- if host in tokens:
- cookies.append("token=" + tokens[host])
- if os.geteuid() == 0:
- for name in ("CIB_user", "CIB_user_groups"):
- if name in os.environ and os.environ[name].strip():
- value = os.environ[name].strip()
- # Let's be safe about characters in env variables and do base64.
- # We cannot do it for CIB_user however to be backward compatible
- # so we at least remove disallowed characters.
- if "CIB_user" == name:
- value = re.sub(r"[^!-~]", "", value).replace(";", "")
- else:
- # python3 requires the value to be bytes not str
- value = base64.b64encode(value.encode("utf8"))
- cookies.append("{0}={1}".format(name, value))
+ def __debug_callback(data_type, debug_data):
+ prefixes = {
+ pycurl.DEBUG_TEXT: b"* ",
+ pycurl.DEBUG_HEADER_IN: b"< ",
+ pycurl.DEBUG_HEADER_OUT: b"> ",
+ pycurl.DEBUG_DATA_IN: b"<< ",
+ pycurl.DEBUG_DATA_OUT: b">> ",
+ }
+ if data_type in prefixes:
+ debug_output.write(prefixes[data_type])
+ debug_output.write(debug_data)
+ if not debug_data.endswith(b"\n"):
+ debug_output.write(b"\n")
+
+ output = BytesIO()
+ debug_output = BytesIO()
+ cookies = __get_cookie_list(host, readTokens())
+ if not timeout:
+ timeout = settings.default_request_timeout
+ if "--request-timeout" in pcs_options:
+ timeout = pcs_options["--request-timeout"]
+
+ handler = pycurl.Curl()
+ handler.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTPS)
+ handler.setopt(pycurl.URL, url.encode("utf-8"))
+ handler.setopt(pycurl.WRITEFUNCTION, output.write)
+ handler.setopt(pycurl.VERBOSE, 1)
+ handler.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading
+ handler.setopt(pycurl.DEBUGFUNCTION, __debug_callback)
+ handler.setopt(pycurl.TIMEOUT_MS, int(timeout * 1000))
+ handler.setopt(pycurl.SSL_VERIFYHOST, 0)
+ handler.setopt(pycurl.SSL_VERIFYPEER, 0)
if cookies:
- opener.addheaders.append(('Cookie', ";".join(cookies)))
-
- # send the request
- urllib_install_opener(opener)
+ handler.setopt(pycurl.COOKIE, ";".join(cookies).encode("utf-8"))
+ if data:
+ handler.setopt(pycurl.COPYPOSTFIELDS, data.encode("utf-8"))
try:
- result = opener.open(url,data)
- # python3 returns bytes not str
- html = result.read().decode("utf-8")
+ handler.perform()
+ response_data = output.getvalue().decode("utf-8")
+ response_code = handler.getinfo(pycurl.RESPONSE_CODE)
if printResult or printSuccess:
- print(host + ": " + html.strip())
+ print(host + ": " + response_data.strip())
if "--debug" in pcs_options:
- print("Response Code: 0")
- print("--Debug Response Start--\n{0}".format(html), end="")
+ print("Response Code: {0}".format(response_code))
+ print("--Debug Response Start--\n{0}".format(response_data))
print("--Debug Response End--")
+ print("Communication debug info for calling: {0}".format(url))
+ print("--Debug Communication Output Start--")
+ print(debug_output.getvalue().decode("utf-8", "ignore"))
+ print("--Debug Communication Output End--")
print()
- return (0,html)
- except urllib_HTTPError as e:
- if "--debug" in pcs_options:
- print("Response Code: " + str(e.code))
- html = e.read().decode("utf-8")
- print("--Debug Response Start--\n{0}".format(html), end="")
- print("--Debug Response End--")
- if e.code == 401:
+
+ if response_code == 401:
output = (
3,
- "Unable to authenticate to {node} - (HTTP error: {code}), try running 'pcs cluster auth'".format(
- node=host, code=e.code
- )
+ (
+ "Unable to authenticate to {node} - (HTTP error: {code}), "
+ "try running 'pcs cluster auth'"
+ ).format(node=host, code=response_code)
)
- elif e.code == 403:
+ elif response_code == 403:
output = (
4,
"{node}: Permission denied - (HTTP error: {code})".format(
- node=host, code=e.code
+ node=host, code=response_code
)
)
- else:
+ elif response_code >= 400:
output = (
1,
"Error connecting to {node} - (HTTP error: {code})".format(
- node=host, code=e.code
+ node=host, code=response_code
)
)
- if printResult:
+ else:
+ output = (0, response_data)
+
+ if printResult and output[0] != 0:
print(output[1])
+
return output
- except urllib_URLError as e:
+ except pycurl.error as e:
+ if is_proxy_set(os.environ):
+ print(
+ "Warning: Proxy is set in environment variables, try "
+ "disabling it"
+ )
+ dummy_errno, reason = e.args
if "--debug" in pcs_options:
- print("Response Reason: " + str(e.reason))
+ print("Response Reason: {0}".format(reason))
+ msg = "Unable to connect to {host} ({reason})".format(
+ host=host, reason=reason
+ )
if printResult:
- print("Unable to connect to %s (%s)" % (host, e.reason))
- return (2,"Unable to connect to %s (%s)" % (host, e.reason))
+ print(msg)
+ return (2, msg)
+
+
+def __get_cookie_list(host, tokens):
+ cookies = []
+ if host in tokens:
+ cookies.append("token=" + tokens[host])
+ if os.geteuid() == 0:
+ for name in ("CIB_user", "CIB_user_groups"):
+ if name in os.environ and os.environ[name].strip():
+ value = os.environ[name].strip()
+ # Let's be safe about characters in env variables and do base64.
+ # We cannot do it for CIB_user however to be backward compatible
+ # so we at least remove disallowed characters.
+ if "CIB_user" == name:
+ value = re.sub(r"[^!-~]", "", value).replace(";", "")
+ else:
+ # python3 requires the value to be bytes not str
+ value = base64.b64encode(
+ value.encode("utf8")
+ ).decode("utf-8")
+ cookies.append("{0}={1}".format(name, value))
+ return cookies
+
def getNodesFromCorosyncConf(conf_text=None):
if is_rhel6():
@@ -950,6 +1031,10 @@ def run_pcsdcli(command, data=None):
env_var = dict()
if "--debug" in pcs_options:
env_var["PCSD_DEBUG"] = "true"
+ if "--request-timeout" in pcs_options:
+ env_var["PCSD_NETWORK_TIMEOUT"] = str(pcs_options["--request-timeout"])
+ else:
+ env_var["PCSD_NETWORK_TIMEOUT"] = str(settings.default_request_timeout)
pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
if pcs_dir == "/usr/sbin":
pcsd_dir_path = settings.pcsd_exec_location
@@ -958,20 +1043,33 @@ def run_pcsdcli(command, data=None):
pcsdcli_path = os.path.join(pcsd_dir_path, 'pcsd-cli.rb')
gem_home = os.path.join(pcsd_dir_path, 'vendor/bundle/ruby')
env_var["GEM_HOME"] = gem_home
- output, retval = run(
+ stdout, dummy_stderr, retval = cmd_runner().run(
["/usr/bin/ruby", "-I" + pcsd_dir_path, pcsdcli_path, command],
- string_for_stdin=json.dumps(data),
- env_extend=env_var
+ json.dumps(data),
+ env_var
)
try:
- output_json = json.loads(output)
+ output_json = json.loads(stdout)
for key in ['status', 'text', 'data']:
if key not in output_json:
output_json[key] = None
+
+ output = "".join(output_json['log'])
+ # check if some requests timed out, if so print message about it
+ if "error: operation_timedout" in output:
+ print("Error: Operation timed out")
+ # check if there are any connection failures due to proxy in pcsd and
+ # print warning if so
+ proxy_msg = (
+ 'Proxy is set in environment variables, try disabling it'
+ )
+ if proxy_msg in output:
+ print("Warning: {0}".format(proxy_msg))
+
except ValueError:
output_json = {
'status': 'bad_json_output',
- 'text': output,
+ 'text': stdout,
'data': None,
}
return output_json, retval
@@ -1196,16 +1294,18 @@ def dom_get_resource_clone_ms_parent(dom, resource_id):
or
dom_get_group(dom, resource_id)
)
- return dom_elem_get_resource_clone_ms_parent(resource)
+ if resource:
+ return dom_elem_get_resource_clone_ms_parent(resource)
+ return None
def dom_elem_get_resource_clone_ms_parent(resource):
- clone = resource
- while True:
- if not isinstance(clone, xml.dom.minidom.Element):
- return None
- if clone.tagName in ["clone", "master"]:
- return clone
- clone = clone.parentNode
+ return dom_get_parent_by_tag_names(resource, ["clone", "master"])
+
+def dom_get_resource_bundle_parent(dom, resource_id):
+ resource = dom_get_resource(dom, resource_id)
+ if resource:
+ return dom_get_parent_by_tag_names(resource, ["bundle"])
+ return None
def dom_get_master(dom, master_id):
for master in dom.getElementsByTagName("master"):
@@ -1225,6 +1325,22 @@ def dom_get_group(dom, group_id):
return group
return None
+def dom_get_bundle(dom, bundle_id):
+ for bundle in dom.getElementsByTagName("bundle"):
+ if bundle.getAttribute("id") == bundle_id:
+ return bundle
+ return None
+
+def dom_get_resource_bundle(bundle_el):
+ for child in bundle_el.childNodes:
+ if (
+ child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
+ and
+ child.tagName == "primitive"
+ ):
+ return child
+ return None
+
def dom_get_group_clone(dom, group_id):
for clone in dom.getElementsByTagName("clone"):
group = dom_get_group(clone, group_id)
@@ -1281,9 +1397,11 @@ def validate_constraint_resource(dom, resource_id):
dom_get_clone(dom, resource_id)
or
dom_get_master(dom, resource_id)
+ or
+ dom_get_bundle(dom, resource_id)
)
if resource_el:
- # clone and master is always valid
+ # clones, masters and bundles are always valid
return True, "", resource_id
resource_el = (
@@ -1294,9 +1412,14 @@ def validate_constraint_resource(dom, resource_id):
if not resource_el:
return False, "Resource '%s' does not exist" % resource_id, None
- clone_el = dom_get_resource_clone_ms_parent(dom, resource_id)
+ clone_el = (
+ dom_get_resource_clone_ms_parent(dom, resource_id)
+ or
+ dom_get_resource_bundle_parent(dom, resource_id)
+ )
if not clone_el:
- # primitive and group is valid if not in clone nor master
+ # a primitive and a group is valid if not in a clone nor a master nor a
+ # bundle
return True, "", resource_id
if "--force" in pcs_options:
@@ -1322,6 +1445,14 @@ def validate_constraint_resource(dom, resource_id):
% (resource_id, clone_el.getAttribute("id")),
clone_el.getAttribute("id")
)
+ if clone_el.tagName == "bundle":
+ return (
+ False,
+ "%s is a bundle resource, you should use the bundle id: %s "
+ "when adding constraints. Use --force to override."
+ % (resource_id, clone_el.getAttribute("id")),
+ clone_el.getAttribute("id")
+ )
return True, "", resource_id
@@ -1371,12 +1502,12 @@ def dom_get_child_by_tag_name(dom_el, tag_name):
return children[0]
return None
-def dom_get_parent_by_tag_name(dom_el, tag_name):
+def dom_get_parent_by_tag_names(dom_el, tag_names):
parent = dom_el.parentNode
while parent:
if not isinstance(parent, xml.dom.minidom.Element):
return None
- if parent.tagName == tag_name:
+ if parent.tagName in tag_names:
return parent
parent = parent.parentNode
return None
@@ -1390,6 +1521,7 @@ def dom_attrs_to_list(dom_el, with_id=False):
attributes.append("(id:%s)" % (dom_el.getAttribute("id")))
return attributes
+# moved to pcs.lib.pacemaker.state
def get_resource_for_running_check(cluster_state, resource_id, stopped=False):
for clone in cluster_state.getElementsByTagName("clone"):
if clone.getAttribute("id") == resource_id:
@@ -1420,6 +1552,8 @@ def get_resource_for_running_check(cluster_state, resource_id, stopped=False):
resource_id = elem.getAttribute("id")
return resource_id
+# moved to pcs.lib.pacemaker.state
+# see pcs.lib.commands.resource for usage
def resource_running_on(resource, passed_state=None, stopped=False):
nodes_started = []
nodes_master = []
@@ -1477,56 +1611,15 @@ def resource_running_on(resource, passed_state=None, stopped=False):
"nodes_slave": nodes_slave,
}
-def filter_default_op_from_actions(resource_actions):
- filtered = []
- for action in resource_actions:
- if action.get("name", "") not in DEFAULT_RESOURCE_ACTIONS:
- continue
- new_action = dict([
- (name, value)
- for name, value in action.items()
- if name != "depth"
- ])
- filtered.append(new_action)
- return filtered
-
-# Given a resource agent (ocf:heartbeat:XXX) return an list of default
-# operations or an empty list if unable to find any default operations
-def get_default_op_values(full_agent_name):
- default_ops = []
- try:
- if full_agent_name.startswith("stonith:"):
- metadata = lib_ra.StonithAgent(
- cmd_runner(),
- full_agent_name[len("stonith:"):]
- )
- else:
- metadata = lib_ra.ResourceAgent(
- cmd_runner(),
- full_agent_name
- )
- actions = filter_default_op_from_actions(metadata.get_actions())
-
- for action in actions:
- op = [action["name"]]
- for key in action.keys():
- if key != "name" and action[key] != "0":
- op.append("{0}={1}".format(key, action[key]))
- default_ops.append(op)
- except lib_ra.UnableToGetAgentMetadata:
- return []
- except lib_ra.ResourceAgentError as e:
- process_library_reports(
- [lib_ra.resource_agent_error_to_report_item(e)]
- )
- except LibraryError as e:
- process_library_reports(e.args)
-
- return default_ops
-
+def agent_action_to_cmdline_format(action):
+ op = [action["name"]]
+ for key in action.keys():
+ if key != "name" and action[key] != "0":
+ op.append("{0}={1}".format(key, action[key]))
+ return op
def check_pacemaker_supports_resource_wait():
- if not has_resource_wait_support(cmd_runner()):
+ if not has_wait_for_idle_support(cmd_runner()):
err("crm_resource does not support --wait, please upgrade pacemaker")
def validate_wait_get_timeout(need_cib_support=True):
@@ -1850,7 +1943,7 @@ def get_terminal_password(message="Password: "):
def getClusterState():
return parseString(getClusterStateXml())
-# DEPRECATED, please use lib.pacemaker.get_cluster_status_xml in new code
+# DEPRECATED, please use lib.pacemaker.live.get_cluster_status_xml in new code
def getClusterStateXml():
xml, returncode = run(["crm_mon", "--one-shot", "--as-xml", "--inactive"])
if returncode != 0:
@@ -2724,6 +2817,7 @@ def get_lib_env():
cib_data,
corosync_conf_data,
auth_tokens_getter=readTokens,
+ request_timeout=pcs_options.get("--request-timeout"),
)
def get_cli_env():
@@ -2743,6 +2837,7 @@ def get_cli_env():
env.groups = groups
env.auth_tokens_getter = readTokens
env.debug = "--debug" in pcs_options
+ env.request_timeout = pcs_options.get("--request-timeout")
return env
def get_middleware_factory():
@@ -2773,16 +2868,25 @@ def get_modificators():
#there is possible create class extending dict, so dict like access in
#commands is not an issue
return {
+ "after": pcs_options.get("--after", None),
+ "all": "--all" in pcs_options,
"autocorrect": "--autocorrect" in pcs_options,
"autodelete": "--autodelete" in pcs_options,
+ "before": pcs_options.get("--before", None),
"corosync_conf": pcs_options.get("--corosync_conf", None),
"describe": "--nodesc" not in pcs_options,
+ "device": pcs_options.get("--device", []),
+ "disabled": "--disabled" in pcs_options,
"enable": "--enable" in pcs_options,
"force": "--force" in pcs_options,
"full": "--full" in pcs_options,
+ "group": pcs_options.get("--group", None),
+ "monitor": "--monitor" in pcs_options,
"name": pcs_options.get("--name", None),
+ "no-default-ops": "--no-default-ops" in pcs_options,
"skip_offline_nodes": "--skip-offline" in pcs_options,
"start": "--start" in pcs_options,
+ "wait": pcs_options.get("--wait", False),
"watchdog": pcs_options.get("--watchdog", []),
}
diff --git a/pcsd/Gemfile b/pcsd/Gemfile
index ded32ae..e01b31c 100644
--- a/pcsd/Gemfile
+++ b/pcsd/Gemfile
@@ -16,3 +16,5 @@ gem 'json'
gem 'multi_json'
gem 'open4'
gem 'orderedhash'
+gem 'ffi'
+gem 'ethon'
diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
index e56c76a..a3fab96 100644
--- a/pcsd/Gemfile.lock
+++ b/pcsd/Gemfile.lock
@@ -3,8 +3,10 @@ GEM
remote: https://tojeline.fedorapeople.org/rubygems/
specs:
backports (3.6.8)
- json (1.8.3)
- multi_json (1.12.0)
+ ethon (0.10.1)
+ ffi (1.9.17)
+ json (2.0.3)
+ multi_json (1.12.1)
open4 (1.3.4)
orderedhash (0.0.6)
rack (1.6.4)
@@ -13,7 +15,7 @@ GEM
rack-test (0.6.3)
rack (>= 1.0)
rpam-ruby19 (1.2.1)
- sinatra (1.4.7)
+ sinatra (1.4.8)
rack (~> 1.4)
rack-protection (~> 1.4)
tilt (>= 1.3, < 3)
@@ -24,13 +26,15 @@ GEM
rack-test
sinatra (~> 1.4.0)
tilt (>= 1.3, < 3)
- tilt (2.0.3)
+ tilt (2.0.6)
PLATFORMS
ruby
DEPENDENCIES
backports
+ ethon
+ ffi
json
multi_json
open4
diff --git a/pcsd/Makefile b/pcsd/Makefile
index e5ee6de..2ecd4de 100644
--- a/pcsd/Makefile
+++ b/pcsd/Makefile
@@ -1,5 +1,16 @@
+FFI_VERSION="1.9.17"
+FFI_C_DIR=vendor/bundle/ruby/gems/ffi-${FFI_VERSION}/ext/ffi_c
+
build_gems: get_gems
bundle install --local --deployment
+ #ffi makes symlink with absolute path. Let's change it to relative path.
+ for fname in `ls ${FFI_C_DIR}/libffi-*/include/ffitarget.h`; do \
+ if [[ -L "$$fname" ]]; then \
+ target=$$(readlink $$fname | sed "s~.*/${FFI_C_DIR}\(/libffi/src/[^/]\+/ffitarget.h\)~../..\1~"); \
+ rm $$fname; \
+ ln -s $$target $$fname; \
+ fi; \
+ done;
# RHEL6 needs special rpam-ruby19 gem to work with 1.8.7
# also bundler is not available on RHEL6 in rpm
@@ -7,17 +18,19 @@ build_gems_rhel6:
mkdir -p vendor/bundle/ruby
gem install --verbose --no-rdoc --no-ri -l -i vendor/bundle/ruby \
vendor/cache/backports-3.6.8.gem \
- vendor/cache/json-1.8.3.gem \
- vendor/cache/multi_json-1.12.0.gem \
+ vendor/cache/ethon-0.9.1.gem \
+ vendor/cache/ffi-${FFI_VERSION}.gem \
+ vendor/cache/json-2.0.3.gem \
+ vendor/cache/multi_json-1.12.1.gem \
vendor/cache/open4-1.3.4.gem \
vendor/cache/orderedhash-0.0.6.gem \
vendor/cache/rack-1.6.4.gem \
vendor/cache/rack-protection-1.5.3.gem \
vendor/cache/rack-test-0.6.3.gem \
vendor/cache/rpam-ruby19-feist-1.2.1.1.gem \
- vendor/cache/tilt-2.0.3.gem \
- vendor/cache/sinatra-1.4.7.gem \
+ vendor/cache/sinatra-1.4.8.gem \
vendor/cache/sinatra-contrib-1.4.7.gem \
+ vendor/cache/tilt-2.0.6.gem \
-- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"'
get_gems:
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index f944d20..78bd87f 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -1,6 +1,7 @@
+require 'digest/sha2'
require 'logger'
-require 'pathname'
require 'open4'
+require 'pathname'
require 'settings.rb'
@@ -35,16 +36,20 @@ def is_systemctl()
return false
end
-def get_pcs_path(pcsd_path)
- real_path = Pathname.new(pcsd_path).realpath.to_s
- if PCSD_EXEC_LOCATION == real_path or PCSD_EXEC_LOCATION == (real_path + '/')
- return '/usr/sbin/pcs'
+def get_pcs_path()
+ pcsd_path = Pathname.new(
+ File.expand_path(File.dirname(__FILE__))
+ ).realpath.to_s
+ if PCSD_EXEC_LOCATION == pcsd_path or PCSD_EXEC_LOCATION == (pcsd_path + '/')
+ return PCS_EXEC
else
- return '../pcs/pcs'
+ return pcsd_path + '/../pcs/pcs'
end
end
-PCS_VERSION = '0.9.155'
+PCS_VERSION = '0.9.158'
+# unique instance signature, allows detection of dameon restarts
+DAEMON_INSTANCE_SIGNATURE = Digest::SHA2.hexdigest("#{Time.now} #{rand()}")
COROSYNC = COROSYNC_BINARIES + "corosync"
ISRHEL6 = is_rhel6
ISSYSTEMCTL = is_systemctl
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 11d3b2b..930b4a0 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -1,14 +1,17 @@
# Wrapper for PCS command
-#
+
+require 'etc'
require 'open4'
require 'shellwords'
require 'cgi'
require 'net/http'
require 'net/https'
+require 'uri'
require 'json'
require 'fileutils'
require 'backports'
require 'base64'
+require 'ethon'
require 'config.rb'
require 'cfgsync.rb'
@@ -58,8 +61,14 @@ def add_node_attr(auth_user, node, key, value)
end
def add_meta_attr(auth_user, resource, key, value)
+ # --force is a workaround for:
+ # 1) Error: this command is not sufficient for create guest node, use 'pcs
+ # cluster node add-guest', use --force to override
+ # 2) Error: this command is not sufficient for remove guest node, use 'pcs
+ # cluster node remove-guest', use --force to override
stdout, stderr, retval = run_cmd(
- auth_user, PCS, "resource", "meta", resource, key.to_s + "=" + value.to_s
+ auth_user, PCS, "resource", "meta", resource, key.to_s + "=" + value.to_s,
+ "--force"
)
return retval
end
@@ -391,7 +400,7 @@ def send_nodes_request_with_token(auth_user, nodes, request, post=false, data={}
return code, out
end
-def send_request_with_token(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, additional_tokens={})
+def send_request_with_token(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=nil, additional_tokens={})
token = additional_tokens[node] || get_node_token(node)
$logger.info "SRWT Node: #{node} Request: #{request}"
if not token
@@ -406,74 +415,138 @@ def send_request_with_token(auth_user, node, request, post=false, data={}, remot
)
end
-def send_request(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, cookies_data=nil)
+def _get_cookie_list(auth_user, cookies_data)
+ cookie_list = []
+ cookies_data_default = {}
+ # Let's be safe about characters in cookie variables and do base64.
+ # We cannot do it for CIB_user however to be backward compatible
+ # so we at least remove disallowed characters.
+ cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
+ auth_user[:username].to_s
+ )
+ cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
+ (auth_user[:usergroups] || []).join(' ')
+ )
+
+ cookies_data_default.update(cookies_data)
+ cookies_data_default.each { |name, value|
+ cookie_list << CGI::Cookie.new('name' => name, 'value' => value).to_s
+ }
+ return cookie_list
+end
+
+def _transform_data(data)
+ # Converts data in a way that URI.encode_www_form method will encode it
+ # corectly. If an arrray is passed as value to encode_www_form, then parser of
+ # webbrick will use only last value.
+ new_data = []
+ data.each { |key, val|
+ if val.kind_of?(Array)
+ val.each { |value|
+ new_data << ["#{key.to_s}[]", value]
+ }
+ else
+ new_data << [key, val]
+ end
+ }
+ return new_data
+end
+
+def send_request(
+ auth_user, node, request, post=false, data={}, remote=true, raw_data=nil,
+ timeout=nil, cookies_data=nil
+)
cookies_data = {} if not cookies_data
- request = "/#{request}" if not request.start_with?("/")
+ if request.start_with?("/")
+ request.slice!(0)
+ end
- # fix ipv6 address for URI.parse
node6 = node
if (node.include?(":") and ! node.start_with?("["))
node6 = "[#{node}]"
end
if remote
- uri = URI.parse("https://#{node6}:2224/remote" + request)
+ url = "https://#{node6}:2224/remote/#{request}"
else
- uri = URI.parse("https://#{node6}:2224" + request)
+ url = "https://#{node6}:2224/#{request}"
end
+ data = _transform_data(data)
+
if post
- req = Net::HTTP::Post.new(uri.path)
- raw_data ? req.body = raw_data : req.set_form_data(data)
+ encoded_data = (raw_data) ? raw_data : URI.encode_www_form(data)
else
- req = Net::HTTP::Get.new(uri.path)
- req.set_form_data(data)
+ url_data = (raw_data) ? raw_data : URI.encode_www_form(data)
+ prefix = request.include?('?') ? '&' : '?'
+ url += "#{prefix}#{url_data}"
end
- cookies_to_send = []
- cookies_data_default = {}
- # Let's be safe about characters in cookie variables and do base64.
- # We cannot do it for CIB_user however to be backward compatible
- # so we at least remove disallowed characters.
- cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
- auth_user[:username].to_s
- )
- cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
- (auth_user[:usergroups] || []).join(' ')
- )
-
- cookies_data_default.update(cookies_data)
- cookies_data_default.each { |name, value|
- cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s
- }
- req.add_field('Cookie', cookies_to_send.join(';'))
+ timeout_ms = 30000
begin
- # uri.host returns "[addr]" for ipv6 addresses, which is wrong
- # uri.hostname returns "addr" for ipv6 addresses, which is correct, but it
- # is not available in older ruby versions
- # There is a bug in Net::HTTP.new in some versions of ruby which prevents
- # ipv6 addresses being used here at all.
- myhttp = Net::HTTP.new(node, uri.port)
- myhttp.use_ssl = true
- myhttp.verify_mode = OpenSSL::SSL::VERIFY_NONE
- res = myhttp.start do |http|
- http.read_timeout = timeout
- http.request(req)
- end
- return res.code.to_i, res.body
- rescue Exception => e
- $logger.info "No response from: #{node} request: #{request}, exception: #{e}"
+ if timeout
+ timeout_ms = (Float(timeout) * 1000).to_i
+ elsif ENV['PCSD_NETWORK_TIMEOUT']
+ timeout_ms = (Float(ENV['PCSD_NETWORK_TIMEOUT']) * 1000).to_i
+ end
+ rescue
+ end
+
+ req = Ethon::Easy.new()
+ req.set_attributes({
+ :url => url,
+ :timeout_ms => timeout_ms,
+ :cookie => _get_cookie_list(auth_user, cookies_data).join(';'),
+ :ssl_verifyhost => 0,
+ :ssl_verifypeer => 0,
+ :postfields => (encoded_data) ? encoded_data : nil,
+ :httpget => (post ? 0 : 1),
+ :nosignal => 1, # required for multi-threading
+ })
+ return_code = req.perform
+ if return_code == :ok
+ return req.response_code, req.response_body
+ else
+ if is_proxy_set(ENV)
+ $logger.warn(
+ 'Proxy is set in environment variables, try disabling it'
+ )
+ end
+ $logger.info(
+ "No response from: #{node} request: #{request}, error: #{return_code}"
+ )
return 400,'{"noresponse":true}'
end
end
-def add_node(auth_user, new_nodename, all=false, auto_start=true, watchdog=nil)
+def is_proxy_set(env_var_hash)
+ proxy_list = ["https_proxy", "all_proxy"]
+ proxy_list += proxy_list.map {|item| item.upcase}
+ proxy_list.each { |var|
+ if env_var_hash[var] and env_var_hash[var] != ''
+ return true
+ end
+ }
+ return false
+end
+
+def add_node(
+ auth_user, new_nodename, all=false, auto_start=true, watchdog=nil,
+ device_list=nil
+)
if all
command = [PCS, "cluster", "node", "add", new_nodename]
if watchdog and not watchdog.strip.empty?
command << "--watchdog=#{watchdog.strip}"
end
+ if device_list
+ device_list.each { |device|
+ if device and not device.strip.empty?
+ command << "--device=#{device.strip}"
+ end
+ }
+ end
if auto_start
command << '--start'
command << '--enable'
@@ -686,16 +759,16 @@ def get_resource_agents_avail(auth_user, params)
code, result = send_cluster_request_with_token(
auth_user, params[:cluster], 'get_avail_resource_agents'
)
- return {} if 200 != code
+ return [] if 200 != code
begin
ra = JSON.parse(result)
if (ra["noresponse"] == true) or (ra["notauthorized"] == "true") or (ra["notoken"] == true) or (ra["pacemaker_not_running"] == true)
- return {}
+ return []
else
- return ra
+ return ra.keys
end
rescue JSON::ParserError
- return {}
+ return []
end
end
@@ -815,11 +888,13 @@ def get_fence_levels(auth_user, cib_dom=nil)
'/cib/configuration/fencing-topology/fencing-level'
) { |e|
target = e.attributes['target']
- fence_levels[target] ||= []
- fence_levels[target] << {
- 'level' => e.attributes['index'],
- 'devices' => e.attributes['devices']
- }
+ if target
+ fence_levels[target] ||= []
+ fence_levels[target] << {
+ 'level' => e.attributes['index'],
+ 'devices' => e.attributes['devices']
+ }
+ end
}
fence_levels.each { |_, val| val.sort_by! { |obj| obj['level'].to_i }}
@@ -955,8 +1030,11 @@ def get_rhel_version()
end
def pcsd_restart()
+ # restart in a separate process so we can send a response to the restart
+ # request
fork {
- sleep(10)
+ # let us send the response to the restart request
+ sleep(3)
if ISSYSTEMCTL
exec("systemctl", "restart", "pcsd")
else
@@ -1337,10 +1415,18 @@ def pcsd_restart_nodes(auth_user, nodes)
node_status = {}
node_response.each { |node, response|
if response[0] == 200
- node_status[node] = {
+ my_status = {
'status' => 'ok',
'text' => 'Success',
}
+ begin
+ parsed_response = JSON.parse(response[1], {:symbolize_names => true})
+ if parsed_response[:instance_signature]
+ my_status["instance_signature"] = parsed_response[:instance_signature]
+ end
+ rescue JSON::ParserError
+ end
+ node_status[node] = my_status
else
text = response[1]
if response[0] == 401
@@ -1371,11 +1457,22 @@ def pcsd_restart_nodes(auth_user, nodes)
}
end
-def write_file_lock(path, perm, data, binary=false)
+def get_uid(username)
+ return Etc.getpwnam(username).uid
+end
+
+def get_gid(groupname)
+ return Etc.getgrnam(groupname).gid
+end
+
+def write_file_lock(path, perm, data, binary=false, user=nil, group=nil)
file = nil
begin
file = File.open(path, binary ? 'wb' : 'w', perm)
file.flock(File::LOCK_EX)
+ if user or group
+ File.chown(get_uid(user), get_gid(group), path)
+ end
file.write(data)
rescue => e
$logger.error("Cannot save file '#{path}': #{e.message}")
@@ -1999,6 +2096,9 @@ def is_service_installed?(service)
return false
end
+ # currently we are not using systemd instances (service_name at instance) in pcsd
+ # for proper implementation of is_service_installed see
+ # pcs/lib/external.py:is_service_installed
stdout, _, retcode = run_cmd(
PCSAuth.getSuperuserAuth(), 'systemctl', 'list-unit-files', '--full'
)
@@ -2095,13 +2195,6 @@ def get_sbd_service_name()
end
end
-def write_booth_config(config, data)
- if config.include?('/')
- raise InvalidFileNameException.new(config)
- end
- write_file_lock(File.join(BOOTH_CONFIG_DIR, config), nil, data)
-end
-
def read_booth_config(config)
if config.include?('/')
raise InvalidFileNameException.new(config)
@@ -2113,15 +2206,6 @@ def read_booth_config(config)
return read_file_lock(config_path)
end
-def write_booth_authfile(filename, data)
- if filename.include?('/')
- raise InvalidFileNameException.new(filename)
- end
- write_file_lock(
- File.join(BOOTH_CONFIG_DIR, filename), 0600, Base64.decode64(data), true
- )
-end
-
def read_booth_authfile(filename)
if filename.include?('/')
raise InvalidFileNameException.new(filename)
diff --git a/pcsd/pcsd-cli.rb b/pcsd/pcsd-cli.rb
index 06578e5..db56c91 100755
--- a/pcsd/pcsd-cli.rb
+++ b/pcsd/pcsd-cli.rb
@@ -27,7 +27,7 @@ end
# bootstrap, emulate environment created by pcsd http server
auth_user = {}
-PCS = get_pcs_path(File.expand_path(File.dirname(__FILE__)))
+PCS = get_pcs_path()
$logger_device = StringIO.new
$logger = configure_logger($logger_device)
diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
new file mode 100644
index 0000000..2d8b8df
--- /dev/null
+++ b/pcsd/pcsd.8
@@ -0,0 +1,100 @@
+.TH PCSD "8" "May 2017" "pcs 0.9.158" "System Administration Utilities"
+.SH NAME
+pcsd \- pacemaker/corosync configuration system daemon
+
+.SH DESCRIPTION
+Daemon for controlling and configuring pacakamer/corosync clusters via pcs.
+
+.SH ENVIRONMENT
+
+.SS Network and SSL/TLS Settings
+.TP
+.B PCSD_BIND_ADDR=<string>
+List of IP addresses pcsd should bind to delimited by ',' character.
+.TP
+.B PCSD_SSL_OPTIONS=<string>
+SSL/TLS options delimited by ',' character. This is usually used to set SSL/TLS protocols accepted by pcsd. List of valid options can be obtained by running: ruby -e 'require "openssl"; puts OpenSSL::SSL.constants.grep /^OP_/'
+.TP
+.B PCSD_SSL_CIPHERS=<string>
+SSL/TLS ciphers accepted by pcsd.
+
+.SS Web UI Settings
+.TP
+.B PCSD_DISABLE_GUI=<boolean>
+Set to \fBtrue\fR to disable web UI frontend in pcsd.
+.TP
+.B PCSD_SESSION_LIFETIME=<integer>
+Web UI session lifetime in seconds.
+
+.SS Proxy Settings
+See ENVIRONMENT section in curl(1) man page for more details.
+.TP
+.B https_proxy=<string>, HTTPS_PROXY=<string>
+Proxy server address for node to node communication.
+.TP
+.B all_proxy=<string>, ALL_PROXY=<string>
+Proxy server address for node to node communication.
+.TP
+.B no_proxy=<string>, NO_PROXY=<string>
+List of hostnames for which proxy is not used.
+
+.SS Miscellaneous Settings
+.TP
+.B PCSD_DEBUG=<boolean>
+Set to \fBtrue\fR for advanced pcsd debugging information.
+.TP
+.B RACK_ENV=<string>
+Value of this variable has to be \fBproduction\fR, otherwise pcsd will not work properly.
+
+.SH FILES
+All files described in this section are located in \fB/var/lib/pcsd/\fR. They are not meant to be edited manually unless said otherwise.
+
+.SS cfgsync_ctl
+This JSON file controls pcsd and cluster configuration files synchronization across cluster nodes which pcsd is doing automatically in the background. This file can be safely edited manually.
+.br
+Example:
+.br
+{
+.br
+ "file_backup_count": 50,
+.br
+ "thread_disabled": false,
+.br
+ "thread_interval": 60,
+.br
+ "thread_paused_until": 1487780453,
+.br
+}
+
+.TP
+.B file_backup_count
+How many backup files should be kept for each synchronized file.
+.TP
+.B thread_disabled
+Set this to \fBtrue\fR to completely disable the synchronization.
+.TP
+.B thread_interval
+How often in seconds should pcsd ask other nodes if the synchronized files have changed.
+.TP
+.B thread_paused_until
+Disable the synchronization until the set unix timestamp.
+
+.SS pcsd_settings.conf
+This JSON file contains web UI and cluster configuration such as clusters managed from web UI and cluster permissions.
+
+.SS pcs_users.conf
+This JSON file stores authentication tokens accepted by local instance of pcsd.
+
+.SS pcsd.cookiesecret
+Web UI cookie secret.
+
+.SS pcsd.crt, pcsd.key
+These files contain pcsd server certificate and private key.
+
+.SS tokens
+This JSON file stores authentication tokens which are used to login to remote instances of pcsd.
+
+.SH SEE ALSO
+.BR pcs (8)
+
+.BR curl (1)
diff --git a/pcsd/pcsd.conf b/pcsd/pcsd.conf
index 0ac5eec..b3433ac 100644
--- a/pcsd/pcsd.conf
+++ b/pcsd/pcsd.conf
@@ -15,7 +15,14 @@ PCSD_SESSION_LIFETIME=3600
# ruby -e 'require "openssl"; puts OpenSSL::SSL.constants.grep /^OP_/'
#PCSD_SSL_OPTIONS='OP_NO_SSLv2,OP_NO_SSLv3,OP_NO_TLSv1,OP_NO_TLSv1_1'
# set SSL ciphers
-#PCSD_SSL_CIPHERS='DEFAULT:!RC4:!3DES:@STRENGTH!'
+#PCSD_SSL_CIPHERS='DEFAULT:!RC4:!3DES:@STRENGTH'
+
+# Proxy settings for pcsd node to node communication
+# See ENVIRONMENT section in curl(1) man page for more details.
+# Proxy address
+#HTTPS_PROXY=
+# Do not use proxy for specified hostnames
+#NO_PROXY=
# Do not change
RACK_ENV=production
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index dcfd5a0..33d999d 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -107,7 +107,7 @@ configure do
DISABLE_GUI = (
ENV['PCSD_DISABLE_GUI'] and ENV['PCSD_DISABLE_GUI'].downcase == 'true'
)
- PCS = get_pcs_path(File.expand_path(File.dirname(__FILE__)))
+ PCS = get_pcs_path()
logger = File.open("/var/log/pcsd/pcsd.log", "a+", 0600)
STDOUT.reopen(logger)
STDERR.reopen(logger)
@@ -778,7 +778,9 @@ already been added to pcsd. You may not add two clusters with the same name int
if @nodes == []
redirect '/manage/'
end
- @resource_agents = get_resource_agents_avail(auth_user, params)
+ @resource_agent_structures = get_resource_agents_avail(auth_user, params) \
+ .map{|agent_name| get_resource_agent_name_structure(agent_name)} \
+ .select{|structure| structure != nil}
@stonith_agents = get_stonith_agents_avail(auth_user, params)
erb :nodes, :layout => :main
end
@@ -967,7 +969,7 @@ already been added to pcsd. You may not add two clusters with the same name int
else
definition = JSON.parse(out)
end
-
+
definition.each { |name, prop|
prop['value'] = properties[name]
}
diff --git a/pcsd/pcsd_action_command.rb b/pcsd/pcsd_action_command.rb
new file mode 100644
index 0000000..4e642c0
--- /dev/null
+++ b/pcsd/pcsd_action_command.rb
@@ -0,0 +1,92 @@
+require 'pcsd_exchange_format.rb'
+require 'settings.rb'
+require 'pcs.rb' #(enable|disable|start|stop)_service,
+
+module PcsdActionCommand
+ class ActionType
+ def initialize(id, action)
+ @id = id
+ @action = action
+ end
+
+ def validate()
+ end
+
+ def process()
+ end
+
+ end
+
+ class ServiceCommand < ActionType
+ @@services = {
+ "pacemaker_remote" => [
+ "enable",
+ "disable",
+ "start",
+ "stop",
+ ]
+ }
+
+ def error(message)
+ return PcsdExchangeFormat::Error.for_item("action", @id, message)
+ end
+
+ def validate()
+ unless @action.has_key?(:service)
+ raise self.error("'service' is missing")
+ end
+
+ unless @action.has_key?(:command)
+ raise self.error("'command' is missing")
+ end
+
+ unless @@services.key?(@action[:service])
+ raise self.error(
+ "unsupported 'service' ('#{@action[:service]}')"+
+ " supported are #{@@services.keys.sort}"
+ )
+ end
+
+ unless @@services[@action[:service]].include?(@action[:command])
+ raise self.error(
+ "unsupported 'command' ('#{@action[:command]}') for service "+
+ "'#{@action[:service]}',"+
+ " supported are #{@@services[@action[:service]].sort}"
+ )
+ end
+ end
+
+ def run_service_command()
+ #validate here required or else there could be entered a disallowed
+ #@action[:service]
+ self.validate()
+
+ case @action[:command]
+ when "enable"
+ return enable_service(@action[:service])
+ when "disable"
+ return disable_service(@action[:service])
+ when "start"
+ return start_service(@action[:service])
+ when "stop"
+ return stop_service(@action[:service])
+ else
+ #a mistake in @@services?
+ raise self.error(
+ "unsupported 'command' ('#{@action[:command]}') for service "+
+ "'#{@action[:service]}'"
+ )
+ end
+ end
+
+ def process()
+ return PcsdExchangeFormat::result(
+ self.run_service_command() ? :success : :fail
+ )
+ end
+ end
+
+ TYPES = {
+ "service_command" => ServiceCommand,
+ }
+end
diff --git a/pcsd/pcsd_exchange_format.rb b/pcsd/pcsd_exchange_format.rb
new file mode 100644
index 0000000..6870b16
--- /dev/null
+++ b/pcsd/pcsd_exchange_format.rb
@@ -0,0 +1,52 @@
+module PcsdExchangeFormat
+ class Error < StandardError
+ def self.for_item(item_name, id, message)
+ new "#{item_name} (key: #{id}): #{message}"
+ end
+ end
+end
+
+def PcsdExchangeFormat::result(code, message="")
+ return {
+ :code => code,
+ :message => message,
+ }
+end
+
+def PcsdExchangeFormat.no_hash_message(no_hash)
+ return "should be 'Hash'. "+
+ "But it is '#{no_hash.class}': #{JSON.generate(no_hash)}"
+end
+
+def PcsdExchangeFormat.validate_item_map_is_Hash(items_name, item_map)
+ unless item_map.is_a? Hash
+ raise PcsdExchangeFormat::Error.new(
+ "#{items_name} #{self.no_hash_message(item_map)}"
+ )
+ end
+end
+
+def PcsdExchangeFormat.validate_item_is_Hash(item_name, id, file_data)
+ unless file_data.is_a? Hash
+ raise PcsdExchangeFormat::Error.for_item(
+ item_name, id, PcsdExchangeFormat::no_hash_message(file_data)
+ )
+ end
+end
+
+def PcsdExchangeFormat.run_action(action_types, item_name, id, action_hash)
+ unless action_hash.has_key?(:type)
+ raise PcsdExchangeFormat::Error.for_item(item_name, id, "'type' is missing")
+ end
+
+ unless action_types.key?(action_hash[:type])
+ raise PcsdExchangeFormat::Error.for_item(
+ item_name,
+ id,
+ "unsupported 'type' ('#{action_hash[:type]}')"+
+ " supported are #{action_types.keys}"
+ )
+ end
+
+ return action_types[action_hash[:type]].new(id, action_hash).process()
+end
diff --git a/pcsd/pcsd_file.rb b/pcsd/pcsd_file.rb
new file mode 100644
index 0000000..de7d355
--- /dev/null
+++ b/pcsd/pcsd_file.rb
@@ -0,0 +1,189 @@
+require 'base64'
+require 'pcs.rb' #write_file_lock, read_file_lock
+require 'settings.rb'
+require 'pcsd_exchange_format.rb'
+
+
+module PcsdFile
+ class PutFile
+ def initialize(id, file)
+ @id = id
+ @file = file
+ end
+
+ def validate()
+ PcsdFile::validate_file_key_with_string(@id, @file, :data)
+ end
+
+ def rewrite_existing()
+ return @file[:rewrite_existing]
+ end
+
+ def full_file_name()
+ raise NotImplementedError.new(
+ "'#{__method__}' is not implemented in '#{self.class}'"
+ )
+ end
+
+ def binary?()
+ return true
+ end
+
+ def exists?()
+ return @exists if defined? @exists
+ @exists ||= File.file?(self.full_file_name)
+ end
+
+ def exists_with_same_content()
+ unless self.exists?
+ return false
+ end
+
+ if self.binary?
+ return Base64.strict_encode64(self.read()) == @file[:data]
+ end
+
+ return self.read() == @file[:data]
+ end
+
+ def write()
+ write_file_lock(
+ self.full_file_name,
+ self.permissions,
+ self.binary? ? Base64.decode64(@file[:data]) : @file[:data],
+ self.binary?,
+ self.user,
+ self.group
+ )
+ end
+
+ def permissions()
+ return nil
+ end
+
+ def user()
+ return nil
+ end
+
+ def group()
+ return nil
+ end
+
+ def read()
+ return read_file_lock(self.full_file_name, self.binary?)
+ end
+
+ def process()
+ self.validate()
+ begin
+ unless self.exists?
+ self.write()
+ return PcsdExchangeFormat::result(:written)
+ end
+
+ if self.rewrite_existing
+ self.write()
+ return PcsdExchangeFormat::result(:rewritten)
+ end
+
+ if self.exists_with_same_content()
+ return PcsdExchangeFormat::result(:same_content)
+ end
+
+ return PcsdExchangeFormat::result(:conflict)
+ rescue => e
+ return PcsdExchangeFormat::result(:unexpected, e.message)
+ end
+ end
+ end
+
+ class PutFileBooth < PutFile
+ def validate()
+ super
+ PcsdFile::validate_file_key_with_string(@id, @file, :name)
+ if @file[:name].empty?
+ raise PcsdExchangeFormat::Error.for_item('file', @id, "'name' is empty")
+ end
+ end
+
+ def dir()
+ return BOOTH_CONFIG_DIR
+ end
+
+ def full_file_name()
+ @full_file_name ||= File.join(self.dir, @file[:name])
+ end
+ end
+
+ class PutFileBoothAuthfile < PutFileBooth
+ def permissions()
+ return 0600
+ end
+ end
+
+ class PutFileBoothConfig < PutFileBooth
+ def binary?()
+ return false
+ end
+ end
+
+ class PutFilePcmkRemoteAuthkey < PutFile
+ def full_file_name
+ #TODO determine the file name from the system
+ @full_file_name ||= PACEMAKER_AUTHKEY
+ end
+
+ def permissions()
+ return 0400
+ end
+
+ def user()
+ return 'hacluster'
+ end
+
+ def group()
+ return 'haclient'
+ end
+
+ def write()
+ pacemaker_config_dir = File.dirname(PACEMAKER_AUTHKEY)
+ if not File.directory?(pacemaker_config_dir)
+ Dir.mkdir(pacemaker_config_dir)
+ end
+ super
+ end
+ end
+
+ class PutFileCorosyncAuthkey < PutFile
+ def full_file_name
+ @full_file_name ||= COROSYNC_AUTHKEY
+ end
+
+ def permissions()
+ return 0400
+ end
+ end
+
+ TYPES = {
+ "booth_authfile" => PutFileBoothAuthfile,
+ "booth_config" => PutFileBoothConfig,
+ "pcmk_remote_authkey" => PutFilePcmkRemoteAuthkey,
+ "corosync_authkey" => PutFileCorosyncAuthkey,
+ }
+end
+
+def PcsdFile.validate_file_key_with_string(id, file_hash, key_name)
+ unless file_hash.has_key?(key_name)
+ raise PcsdExchangeFormat::Error.for_item(
+ 'file', id, "'#{key_name}' is missing"
+ )
+ end
+
+ unless file_hash[key_name].is_a? String
+ raise PcsdExchangeFormat::Error.for_item(
+ 'file',
+ id,
+ "'#{key_name}' is not String: '#{file_hash[key_name].class}'"
+ )
+ end
+end
diff --git a/pcsd/pcsd_remove_file.rb b/pcsd/pcsd_remove_file.rb
new file mode 100644
index 0000000..892c92b
--- /dev/null
+++ b/pcsd/pcsd_remove_file.rb
@@ -0,0 +1,29 @@
+require 'settings.rb'
+
+module PcsdRemoveFile
+ class RemovePcmkRemoteAuthkey
+ def initialize(id, action)
+ @id = id
+ @action = action
+ end
+
+ def validate()
+ end
+
+ def process()
+ unless File.exists? PACEMAKER_AUTHKEY
+ return PcsdExchangeFormat::result(:not_found)
+ end
+ begin
+ File.delete(PACEMAKER_AUTHKEY)
+ return PcsdExchangeFormat::result(:deleted)
+ rescue => e
+ return PcsdExchangeFormat::result(:unexpected, e.message)
+ end
+ end
+ end
+
+ TYPES = {
+ "pcmk_remote_authkey" => RemovePcmkRemoteAuthkey,
+ }
+end
diff --git a/pcsd/public/css/liberation.css b/pcsd/public/css/liberation.css
old mode 100755
new mode 100644
index 53335ca..9178b81
--- a/pcsd/public/css/liberation.css
+++ b/pcsd/public/css/liberation.css
@@ -12,4 +12,3 @@
font-style: normal;
}
-
diff --git a/pcsd/public/css/overpass.css b/pcsd/public/css/overpass.css
old mode 100755
new mode 100644
index df713cd..34cac8f
--- a/pcsd/public/css/overpass.css
+++ b/pcsd/public/css/overpass.css
@@ -1,27 +1,14 @@
-
@font-face {
font-family: 'Overpass';
- src: url('overpass_regular-web.eot');
- src: local('Overpass'),
- url('overpass_regular-web.eot?#iefix') format('eot'),
- url('overpass_regular-web.woff') format('woff'),
- url('overpass_regular-web.ttf') format('truetype'),
- url('overpass_regular-web.svg#webfontLTZe4IYH') format('svg');
+ src: url('Overpass-Regular.ttf') format('truetype');
font-weight: normal;
font-style: normal;
-
}
@font-face {
font-family: 'Overpass';
- src: url('overpass_bold-web.eot');
- src: local('Overpass Bold'), local('Overpass-Bold'),
- url('overpass_bold-web.eot?#iefix') format('eot'),
- url('overpass_bold-web.woff') format('woff'),
- url('overpass_bold-web.ttf') format('truetype'),
- url('overpass_bold-web.svg#webfontzAU82Ltw') format('svg');
+ src: url('Overpass-Bold.ttf') format('truetype');
font-weight: bold;
font-style: normal;
-
}
diff --git a/pcsd/public/css/overpass_bold-web.eot b/pcsd/public/css/overpass_bold-web.eot
deleted file mode 100755
index 2a2784e..0000000
Binary files a/pcsd/public/css/overpass_bold-web.eot and /dev/null differ
diff --git a/pcsd/public/css/overpass_bold-web.svg b/pcsd/public/css/overpass_bold-web.svg
deleted file mode 100755
index c475685..0000000
--- a/pcsd/public/css/overpass_bold-web.svg
+++ /dev/null
@@ -1,470 +0,0 @@
-<?xml version="1.0" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
-<svg xmlns="http://www.w3.org/2000/svg">
-<metadata>
-This is a custom SVG webfont generated by Font Squirrel.
-Copyright : Copyright c 2011 by Red Hat Inc All rights reserved
-Designer : Delve Withrington
-Foundry : Delve Fonts
-Foundry URL : httpwwwredhatcom
-</metadata>
-<defs>
-<font id="webfontzAU82Ltw" horiz-adv-x="1210" >
-<font-face units-per-em="2048" ascent="1536" descent="-512" />
-<missing-glyph horiz-adv-x="471" />
-<glyph unicode="fi" horiz-adv-x="1157" d="M453 813v-813h-266v813h-145v234h145v159q0 78 22.5 127t59 76.5t82.5 38t94 10.5q63 0 120 -11.5t114 -39.5v-217q-31 12 -54 18.5t-41 10.5t-31 5t-24 1q-34 0 -55 -17.5t-21 -54.5v-106h226v-234h-226zM742 0v1047h266v-1047h-266zM875 1169q-32 0 -60.5 12.5 t-49.5 33.5t-33.5 49.5t-12.5 60.5t12.5 60t33.5 49t49.5 33t60.5 12t60.5 -12t49.5 -33t33 -49t12 -60t-12 -60.5t-33 -49.5t-49.5 -33.5t-60.5 -12.5z" />
-<glyph unicode="fl" horiz-adv-x="1145" d="M453 813v-813h-266v813h-145v234h145v159q0 78 22.5 127t59 76.5t82.5 38t94 10.5q63 0 120 -11.5t114 -39.5v-217q-31 12 -54 18.5t-41 10.5t-31 5t-24 1q-34 0 -55 -17.5t-21 -54.5v-106h226v-234h-226zM735 0v1354l266 125v-1479h-266z" />
-<glyph unicode=" " horiz-adv-x="471" />
-<glyph unicode="	" horiz-adv-x="471" />
-<glyph unicode=" " horiz-adv-x="471" />
-<glyph unicode="!" horiz-adv-x="614" d="M229 395l-57 600v463h266v-463l-57 -600h-152zM305 -25q-38 0 -70 13.5t-55.5 37.5t-37 56t-13.5 68t13.5 68t37 56t55.5 38t70 14t71 -14t57 -38t38 -56t14 -68t-14 -68t-38 -56t-57 -37.5t-71 -13.5z" />
-<glyph unicode=""" horiz-adv-x="918" d="M92 1434h320l-50 -596h-221zM506 1434h319l-49 -596h-221z" />
-<glyph unicode="#" horiz-adv-x="1434" d="M248 0l55 360h-217l33 230h219l43 289h-217l33 229h221l47 326h233l-49 -326h248l51 326h232l-50 -326h218l-33 -229h-219l-43 -289h217l-35 -230h-217l-55 -360h-234l55 360h-247l-56 -360h-233zM571 590h248l43 289h-248z" />
-<glyph unicode="$" horiz-adv-x="1231" d="M807 1047q-12 30 -31.5 57.5t-45.5 47.5t-59.5 32t-74.5 12q-71 0 -112.5 -33t-41.5 -92q0 -44 28.5 -74t75.5 -54t107 -45.5t122 -48t122 -61.5t107 -85.5t75.5 -121t28.5 -167.5q0 -90 -28 -162t-76.5 -125.5t-114 -88.5t-141.5 -50v-176h-269v182 q-139 34 -234.5 130.5t-142.5 240.5l252 92q21 -48 49.5 -88.5t64 -69.5t76.5 -45t87 -16q88 0 141 45t53 129q0 54 -28.5 90.5t-75.5 64.5t-106.5 50.5l-122 48t-122 57.5t-106.5 79t-75.5 112t-28.5 157q0 66 21 126t61.5 109.5t100 8 [...]
-<glyph unicode="%" horiz-adv-x="1729" d="M440 709q-95 0 -161.5 33.5t-108 87.5t-60 120.5t-18.5 132.5q0 41 8 84.5t25.5 85.5t44.5 79t66 65t89.5 44.5t114.5 16.5q95 0 161.5 -33.5t108 -87.5t60 -121t18.5 -133q0 -61 -18 -127t-58.5 -121t-107.5 -90.5t-164 -35.5zM440 930q31 0 53.5 12.5t37 33.5 t21.5 48.5t7 58.5q0 69 -29.5 111.5t-89.5 42.5q-31 0 -53 -12.5t-36.5 -33.5t-21.5 -49t-7 -59q0 -68 29 -110.5t89 -42.5zM508 -25h-264l977 1483h264zM1288 -25q-96 0 -162.5 34t-107.5 87.5t-59.5 120.5t-18.5 133q0 41 [...]
-<glyph unicode="&" horiz-adv-x="1473" d="M1079 0l-110 137q-79 -77 -179.5 -119.5t-230.5 -42.5q-99 0 -179.5 29.5t-137.5 83.5t-88 130.5t-31 170.5q0 78 24.5 141.5t66.5 114.5t97 90.5t117 71.5q-38 45 -63.5 84t-41.5 76.5t-23 75t-7 79.5q0 82 28 145t77.5 105.5t115.5 64t143 21.5q78 0 143.5 -23.5 t113 -66.5t74 -104.5t26.5 -137.5q0 -65 -18 -118t-49 -96.5t-72.5 -79.5t-88.5 -66l158 -188q8 32 12.5 68t4.5 71h262q-1 -49 -7.5 -99t-18 -97t-28.5 -90t-39 -77l295 -354h-346zM588 231q66 0 123 24.5t106 68.5 [...]
-<glyph unicode="'" horiz-adv-x="504" d="M92 1434h320l-50 -596h-221z" />
-<glyph unicode="(" horiz-adv-x="762" d="M393 -266q-47 61 -92 145.5t-80 190.5t-56.5 233.5t-21.5 276.5t23.5 281.5t61.5 244t87.5 201t100.5 151.5h264q-33 -41 -79 -122t-88 -195t-71.5 -256t-29.5 -305q0 -161 27.5 -298.5t66.5 -246t81 -185.5t70 -116h-264z" />
-<glyph unicode=")" horiz-adv-x="762" d="M104 -266q19 26 44.5 69t53 101.5t54 131t47.5 158t34 182.5t13 204q0 163 -29.5 305t-71.5 256t-88 195t-79 122h264q51 -62 100 -151.5t87.5 -201t61.5 -244t23 -281.5t-21.5 -276.5t-56.5 -233.5t-79.5 -190.5t-91.5 -145.5h-265z" />
-<glyph unicode="*" horiz-adv-x="1126" d="M588 776l-25 96l-24 -96l-152 -203l-199 144l152 205l80 39l-96 2l-222 73l76 238l225 -74l68 -51l-33 82v244h250v-244l-33 -82l68 51l225 74l76 -238l-221 -73l-96 -2l79 -39l152 -205l-199 -144z" />
-<glyph unicode="+" horiz-adv-x="1290" d="M778 573v-352h-266v352h-348v258h348v353h266v-353h348v-258h-348z" />
-<glyph unicode="," horiz-adv-x="506" d="M72 -205q14 12 33 34t38.5 48t36 54t25.5 53q-50 15 -83.5 55.5t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76z" />
-<glyph unicode="-" horiz-adv-x="844" d="M154 477v256h536v-256h-536z" />
-<glyph unicode="." horiz-adv-x="494" d="M246 -25q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13t68.5 -13t55 -36t37 -54t13.5 -67q0 -35 -13.5 -66t-37 -53.5t-55 -35.5t-68.5 -13z" />
-<glyph unicode="/" horiz-adv-x="1069" d="M-31 -266l805 1724h275l-805 -1724h-275z" />
-<glyph unicode="0" horiz-adv-x="1364" d="M682 -25q-103 0 -182 30.5t-138 83.5t-98.5 125t-64 154.5t-35 171.5t-10.5 177t10.5 177.5t35 172t64 154t98.5 124t138 83t182 30.5t182 -30t138 -83t98.5 -124t64 -153.5t35 -172t10.5 -178.5q0 -88 -10.5 -177t-35 -171.5t-64 -154.5t-98.5 -125t-138 -83.5 t-182 -30.5zM682 238q73 0 121.5 41t77.5 108.5t41 153.5t12 176t-12 176t-41 153.5t-77.5 108.5t-121.5 41t-121.5 -41t-77.5 -108.5t-41 -153.5t-12 -176t12 -176t41 -153.5t77.5 -108.5t121.5 -41z" />
-<glyph unicode="1" horiz-adv-x="854" d="M291 0v1061h-219v219q51 0 94.5 7.5t79 25.5t61 47.5t39.5 73.5h221v-1434h-276z" />
-<glyph unicode="2" horiz-adv-x="1266" d="M133 0q0 159 39.5 282.5t107.5 218.5t159 166.5t194 127.5q39 21 76 44t65.5 51t46 62t17.5 76q0 79 -51 123.5t-136 44.5q-90 0 -151 -43t-97 -119l-247 113q31 72 81 129.5t113 98t138 62t157 21.5q111 0 198 -33t147 -90t92 -134.5t32 -166.5q0 -98 -27 -169t-75 -125 t-115 -97.5t-147 -87.5q-112 -62 -186 -131.5t-103 -157.5h665v-266h-993z" />
-<glyph unicode="3" horiz-adv-x="1241" d="M606 -25q-92 0 -170.5 23.5t-142.5 67.5t-112.5 106t-80.5 139l250 101q24 -48 54 -81.5t64 -54t71 -29.5t75 -9q48 0 87.5 12.5t68.5 36t45 57.5t16 78q0 40 -12.5 74.5t-39.5 60t-69 40.5t-100 15h-139v265h139q78 0 125 42.5t47 116.5q0 34 -11.5 63t-33.5 50.5 t-55 34t-76 12.5q-38 0 -69 -10t-57 -27.5t-46.5 -41.5t-36.5 -52l-250 96q31 67 76 122t103.5 94t129.5 60t154 21q100 0 182.5 -30.5t142 -83.5t92 -126t32.5 -157q0 -103 -47 -182t-127 -125q46 -18 86.5 -49.5t71 -74 [...]
-<glyph unicode="4" horiz-adv-x="1348" d="M780 0v287h-749v176l733 971h287v-891h174v-256h-174v-287h-271zM780 543v526l-389 -526h389z" />
-<glyph unicode="5" horiz-adv-x="1249" d="M272 330q26 -17 56 -33.5t65.5 -29.5t78.5 -21t95 -8q61 0 111.5 16.5t87 48t56.5 76.5t20 102q0 99 -53.5 155t-149.5 56q-74 0 -125.5 -27t-93.5 -73l-256 121l37 721h827v-267h-571l-17 -260q45 20 96.5 33.5t114.5 13.5q117 0 204.5 -38t146 -102.5t87.5 -150.5 t29 -182q0 -117 -41.5 -211t-115 -159.5t-174.5 -100.5t-220 -35q-81 0 -149 12.5t-123.5 32t-98 44t-73.5 49.5z" />
-<glyph unicode="6" horiz-adv-x="1270" d="M655 -25q-129 0 -226.5 42.5t-163.5 116.5t-99 175.5t-33 218.5q0 174 48 328t150 276t261 206.5t381 119.5v-262q-79 -12 -157 -38t-146 -69.5t-122 -105.5t-85 -145q38 29 98 50t137 21q102 0 188.5 -34t148.5 -96t97 -148t35 -191q0 -104 -37.5 -189.5t-105.5 -146.5 t-162 -95t-207 -34zM655 238q51 0 94 13.5t74.5 39.5t49.5 63.5t18 85.5q0 49 -18 87.5t-49.5 65t-74.5 40.5t-94 14t-94.5 -14t-75 -41t-49.5 -65.5t-18 -86.5t18 -85.5t49.5 -63.5t75 -39.5t94.5 -13.5z" />
-<glyph unicode="7" horiz-adv-x="1120" d="M348 0q1 175 24.5 331t69.5 299t115 276t162 261h-668v267h987v-244q-75 -86 -134.5 -186.5t-105 -207t-78.5 -215.5t-53.5 -212.5t-31 -197.5t-10.5 -171h-277z" />
-<glyph unicode="8" horiz-adv-x="1300" d="M649 -25q-119 0 -214.5 33t-162.5 93.5t-103 144.5t-36 186q0 126 65 214t173 138q-35 18 -67.5 45.5t-57.5 64.5t-40.5 83.5t-15.5 101.5q0 84 31.5 153.5t90.5 120t144 78t193 27.5q107 0 192.5 -27.5t145 -78t91.5 -120t32 -153.5q0 -54 -15 -99.5t-40.5 -82.5 t-58.5 -64.5t-68 -46.5q54 -25 98 -60t75.5 -79.5t48.5 -98.5t17 -116q0 -102 -36 -186t-103.5 -144.5t-163.5 -93.5t-215 -33zM649 225q53 0 97.5 16t76.5 44.5t50 67t18 83.5q0 47 -18 86t-50 67.5t-76.5 44t-97.5 15.5q [...]
-<glyph unicode="9" horiz-adv-x="1270" d="M336 238q77 2 153 20t142 60t118 113t80 179q-38 -32 -100 -55t-143 -23q-105 0 -193.5 31.5t-153 91.5t-101 146t-36.5 196q0 105 38 190.5t106.5 145.5t164 92.5t210.5 32.5q134 0 231.5 -41.5t160.5 -115.5t93.5 -175.5t30.5 -222.5q0 -207 -48 -375.5t-147 -289 t-250 -189t-356 -74.5v263zM618 795q113 0 177.5 52.5t64.5 151.5q0 45 -17 82.5t-48.5 63.5t-75.5 40.5t-98 14.5q-56 0 -101 -14.5t-76.5 -40.5t-48 -63.5t-16.5 -82.5q0 -99 64 -151.5t175 -52.5z" />
-<glyph unicode=":" horiz-adv-x="494" d="M246 657q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13t68.5 -13t55 -36t37 -54t13.5 -67q0 -35 -13.5 -66t-37 -53.5t-55 -35.5t-68.5 -13zM246 -25q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13 t68.5 -13t55 -36t37 -54t13.5 -67q0 -35 -13.5 -66t-37 -53.5t-55 -35.5t-68.5 -13z" />
-<glyph unicode=";" horiz-adv-x="494" d="M254 657q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13t68.5 -13t55 -36t37 -54t13.5 -67q0 -35 -13.5 -66t-37 -53.5t-55 -35.5t-68.5 -13zM66 -205q13 12 32.5 32.5t39 45.5t36 53.5t25.5 57.5q-50 15 -83.5 55.5t-33.5 103.5 q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -65 -20.5 -125t-55 -112t-79.5 -95t-95 -74z" />
-<glyph unicode="<" horiz-adv-x="1290" d="M1126 201l-962 393v217l962 393v-278l-565 -224l565 -225v-276z" />
-<glyph unicode="=" horiz-adv-x="1290" d="M164 346v258h962v-258h-962zM164 801v258h962v-258h-962z" />
-<glyph unicode=">" horiz-adv-x="1290" d="M1126 594l-962 -393v278l565 223l-565 226v276l962 -393v-217z" />
-<glyph unicode="?" horiz-adv-x="1034" d="M367 387v74q0 95 24.5 160t61 112t79 81.5t79 67.5t61 70t24.5 90q0 70 -45.5 115t-132.5 45q-102 0 -156.5 -63t-54.5 -178h-266q0 120 35 212.5t98.5 155.5t151 96t192.5 33q108 0 191.5 -32t139.5 -88t85 -132t29 -164q0 -63 -14 -111t-37.5 -85.5t-53 -67.5 t-60.5 -56.5l-60.5 -52.5t-53 -56.5t-37.5 -67.5t-14 -86v-72h-266zM502 -25q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13t68.5 -13t55 -36t37 -54t13.5 -67q0 -35 -13.5 -66t-37 -53.5t-5 [...]
-<glyph unicode="@" horiz-adv-x="1714" d="M872 -25q-167 0 -304.5 47.5t-236.5 139t-153.5 225t-54.5 305.5q0 112 27 212.5t77 185.5t119.5 153t155 116t184 73.5t205.5 25.5q160 0 289.5 -48.5t221 -132t140.5 -195t49 -238.5q0 -113 -33 -208.5t-91.5 -164.5t-138 -108t-173.5 -39q-23 0 -48.5 6t-49 18.5 t-41.5 32.5t-27 49q-34 -60 -87 -83t-116 -23q-65 0 -119 24.5t-93 70t-60.5 110t-21.5 145.5q0 78 27 154t76.5 136t119.5 97t157 37q62 0 104.5 -24.5t63.5 -59.5l11 57h202l-67 -364q-13 -62 -20 -105.5t-7 -58.5q0 - [...]
-<glyph unicode="A" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373z" />
-<glyph unicode="B" horiz-adv-x="1460" d="M184 1434h629q131 0 220 -33t143.5 -87t78 -123t23.5 -142q0 -46 -9.5 -89t-30.5 -80.5t-54 -69t-80 -54.5q61 -23 103.5 -59.5t69 -80.5t38.5 -93t12 -97q0 -76 -25.5 -152t-83 -137t-150 -99t-226.5 -38h-658v1434zM856 258q56 0 93 15.5t58.5 39.5t30.5 54t9 59 q0 35 -10.5 68.5t-38.5 59.5t-76 42t-123 16h-338v-354h395zM780 870q110 0 163.5 42.5t53.5 111.5q0 27 -7 54t-26.5 49t-54.5 35.5t-92 13.5h-356v-306h319z" />
-<glyph unicode="C" horiz-adv-x="1352" d="M735 -23q-112 0 -200 30t-155 83t-114 124t-76.5 153.5t-43 171.5t-13.5 178q0 82 14 168.5t43.5 168.5t77 155t114 128t154.5 87t199 32q103 0 187 -28t148 -73.5t108.5 -103.5t68.5 -118l-248 -113q-26 40 -53 72t-58 53.5t-68.5 33t-84.5 11.5q-60 0 -107 -20.5 t-82 -55.5t-59 -82t-39 -99.5t-21.5 -108t-6.5 -107.5q0 -84 18 -168t56 -152t97.5 -110.5t143.5 -42.5q44 0 81 13.5t68.5 37.5t59 58t51.5 75l254 -96q-34 -79 -82 -144.5t-111.5 -112t-143.5 -72.5t-177 -26z" />
-<glyph unicode="D" horiz-adv-x="1495" d="M184 1434h484q125 0 225 -28.5t177.5 -78.5t132.5 -118.5t90.5 -148t52 -167.5t16.5 -176q0 -82 -15.5 -167.5t-49.5 -165t-88.5 -149.5t-133 -122.5t-181.5 -82.5t-235 -30h-475v1434zM692 262q107 0 180 41.5t118 107t65 146.5t20 160q0 84 -21 165.5t-64.5 145.5 t-112 103.5t-162.5 39.5h-254v-909h231z" />
-<glyph unicode="E" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022z" />
-<glyph unicode="F" horiz-adv-x="1217" d="M184 0v1434h961v-263h-684v-301h434v-262h-434v-608h-277z" />
-<glyph unicode="G" horiz-adv-x="1419" d="M764 -25q-111 0 -201 29t-160 80t-121 121.5t-84 152.5t-49 173.5t-16 185.5q0 87 16.5 176t51 171.5t86 154.5t122 125t159 83.5t196.5 30.5q104 0 183.5 -26t141 -68.5t107 -98t82.5 -114.5l-244 -139q-24 36 -52 68.5t-60.5 57.5t-71.5 39.5t-86 14.5 q-67 0 -118 -20.5t-89.5 -56t-64.5 -82.5t-42 -100t-23 -108.5t-7 -107.5q0 -91 19 -176.5t60.5 -152t107 -106.5t157.5 -40q52 0 97.5 18.5t79.5 50t53.5 74.5t19.5 92v12h-244v263h526v-211q0 -138 -41 -243.5t-112.5 -177t-168.5 [...]
-<glyph unicode="H" horiz-adv-x="1522" d="M1061 0v606h-600v-606h-277v1434h277v-566h600v566h276v-1434h-276z" />
-<glyph unicode="I" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276z" />
-<glyph unicode="J" horiz-adv-x="1198" d="M532 -25q-78 0 -149.5 21.5t-133 62t-112 99t-86.5 133.5l242 112q40 -77 99.5 -119t139.5 -42q42 0 81 11.5t69 42.5t48 85.5t18 140.5v912h276v-936q0 -102 -21 -181t-57 -137t-84 -97.5t-103 -63t-113 -34t-114 -10.5z" />
-<glyph unicode="K" horiz-adv-x="1501" d="M1096 0l-369 686l-266 -328v-358h-277v1434h277v-684l543 684h327l-418 -514l506 -920h-323z" />
-<glyph unicode="L" horiz-adv-x="1184" d="M184 0v1434h277v-1168h692v-266h-969z" />
-<glyph unicode="M" horiz-adv-x="1698" d="M1237 0v737q0 12 0.5 28l1 32.5t1 30.5t1.5 22q-11 -34 -21.5 -61t-21.5 -52l-348 -762l-350 762q-11 25 -21.5 52t-21.5 61q1 -8 1.5 -22t1 -30.5l1 -32.5t0.5 -28v-737h-277v1434h281l352 -791q11 -26 20 -49.5t17 -48.5q8 25 17 48.5t20 49.5l358 791h264v-1434h-276z " />
-<glyph unicode="N" horiz-adv-x="1511" d="M1081 0l-569 836q-14 21 -30.5 51.5t-28.5 56.5q3 -23 4.5 -54t1.5 -54v-836h-275v1434h260l555 -826q13 -20 30 -50t30 -58q-3 29 -4.5 58.5t-1.5 49.5v826h274v-1434h-246z" />
-<glyph unicode="O" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7.5 -10 [...]
-<glyph unicode="P" horiz-adv-x="1370" d="M184 0v1434h615q136 0 229 -39.5t151 -104t83.5 -145.5t25.5 -164q0 -52 -12 -105.5t-37 -104t-64 -94.5t-93.5 -77.5t-125 -52.5t-157.5 -19h-338v-528h-277zM811 791q53 0 89 17t58.5 44t32.5 61t10 68q0 31 -8.5 64.5t-30 61.5t-58.5 46t-93 18h-350v-380h350z" />
-<glyph unicode="Q" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -80 -12 -160.5t-37.5 -156.5t-65 -144t-94.5 -123 l94 -139l-205 -135l-100 147q-48 -14 -100 -22.5t-111 -8.5zM766 242q12 0 25.5 1t25.5 3l-106 157l209 136l102 -156q24 35 40.5 76t27 84.5t15.5 87.5t5 86q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114 [...]
-<glyph unicode="R" horiz-adv-x="1462" d="M184 0v1434h666q136 0 227.5 -37.5t147.5 -99.5t80 -143t24 -169q0 -61 -17.5 -123.5t-52.5 -118.5t-87 -101.5t-121 -70.5l278 -571h-311l-268 535h-289v-535h-277zM854 797q53 0 89 15.5t58 41.5t31.5 60t9.5 71q0 34 -8.5 67.5t-29.5 60t-57.5 42.5t-92.5 16h-393 v-374h393z" />
-<glyph unicode="S" horiz-adv-x="1300" d="M872 1044q-13 32 -35 60t-52.5 48.5t-70 32t-87.5 11.5q-90 0 -140.5 -36t-50.5 -101q0 -43 32 -72t84 -52.5t118.5 -44.5t136.5 -46.5t136.5 -60t118.5 -85t84 -120t32 -166.5q0 -105 -40 -186t-109.5 -137t-164.5 -85t-205 -29q-101 0 -189 27.5t-159.5 78.5 t-124.5 124t-84 164l252 94q23 -50 55.5 -91.5t72.5 -71t86.5 -46t99.5 -16.5q52 0 94.5 11t73 33t47 54.5t16.5 75.5q0 52 -32 87.5t-84 62t-118.5 48l-136 45.5t-136 56t-118.5 79t-84 113t-32 160q0 81 32.5 152.5t94.5 12 [...]
-<glyph unicode="T" horiz-adv-x="1280" d="M778 1167v-1167h-276v1167h-410v267h1096v-267h-410z" />
-<glyph unicode="U" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 z" />
-<glyph unicode="V" horiz-adv-x="1444" d="M856 0h-266l-508 1434h299l315 -938q8 -23 15.5 -49.5l13.5 -49.5q5 22 12 48t15 51l315 938h295z" />
-<glyph unicode="W" horiz-adv-x="1786" d="M1694 1434l-310 -1434h-258l-217 844q-9 34 -13 60.5l-7 47.5q-3 -21 -8 -47.5t-13 -58.5l-211 -846h-258l-307 1434h285l160 -807q8 -37 11.5 -61l6.5 -46q3 22 7 46.5t13 62.5l197 805h240l200 -805q10 -38 14 -62.5t7 -46.5l6.5 46t11.5 61l160 807h283z" />
-<glyph unicode="X" horiz-adv-x="1458" d="M1032 0l-303 481l-303 -481h-324l465 739l-436 695h324l274 -439l275 439h323l-436 -695l465 -739h-324z" />
-<glyph unicode="Y" horiz-adv-x="1458" d="M592 0v559l-531 875h314l354 -586l354 586h314l-531 -875v-559h-274z" />
-<glyph unicode="Z" horiz-adv-x="1362" d="M113 0v211l749 960h-704v263h1071v-211l-756 -961h756v-262h-1116z" />
-<glyph unicode="[" horiz-adv-x="799" d="M205 -266v1724h512v-256h-246v-1212h246v-256h-512z" />
-<glyph unicode="\" horiz-adv-x="1069" d="M774 -266l-805 1724h275l805 -1724h-275z" />
-<glyph unicode="]" horiz-adv-x="799" d="M594 -266h-512v256h244v1212h-244v256h512v-1724z" />
-<glyph unicode="^" horiz-adv-x="1290" d="M895 696l-250 416l-250 -416h-276l426 762h200l426 -762h-276z" />
-<glyph unicode="_" horiz-adv-x="1024" d="M-10 -362v204h1044v-204h-1044z" />
-<glyph unicode="`" horiz-adv-x="973" d="M539 1180l-365 319h332l291 -319h-258z" />
-<glyph unicode="a" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -37.5t-1 [...]
-<glyph unicode="b" horiz-adv-x="1180" d="M633 -25q-76 0 -127.5 25.5t-85.5 75.5v-76h-266v1358l266 121v-508q34 45 90 72.5t133 27.5q94 0 173.5 -37.5t137.5 -108t90.5 -172t32.5 -229.5q0 -133 -31 -235.5t-89 -172t-140 -105.5t-184 -36zM602 231q51 0 90 20.5t65.5 59t40 92.5t13.5 121q0 141 -54.5 216 t-154.5 75q-34 0 -63 -7.5t-52 -20t-40 -28t-27 -30.5v-414q29 -40 73 -62t109 -22z" />
-<glyph unicode="c" horiz-adv-x="1120" d="M580 -25q-109 0 -197.5 37.5t-150.5 108t-96 172t-34 229.5t34 230t96 173t150.5 108.5t197.5 37.5q160 0 272.5 -73.5t163.5 -233.5l-254 -84q-23 60 -68 97.5t-108 37.5q-49 0 -88.5 -21t-67 -59t-42.5 -92t-15 -121t15 -120.5t42 -91.5t65 -58.5t85 -20.5q68 0 115 39 t69 113l254 -76q-47 -165 -157.5 -248.5t-280.5 -83.5z" />
-<glyph unicode="d" horiz-adv-x="1180" d="M535 -25q-98 0 -178 36t-136.5 105.5t-87.5 172t-31 235.5q0 128 33 229.5t91 172t137.5 108t173.5 37.5q77 0 133 -27.5t90 -72.5v387l266 121v-1479h-266v76q-32 -48 -88.5 -74.5t-136.5 -26.5zM578 231q65 0 109 22t73 62v414q-10 15 -27 30.5t-40.5 28t-52 20 t-62.5 7.5q-101 0 -155 -75t-54 -216q0 -67 13.5 -121t40 -92.5t65.5 -59t90 -20.5z" />
-<glyph unicode="e" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50z" />
-<glyph unicode="f" horiz-adv-x="739" d="M483 813v-813h-266v813h-145v234h145v159q0 78 22.5 127t59 76.5t82.5 38t94 10.5q63 0 120 -11.5t114 -39.5v-217q-31 12 -54 18.5t-41 10.5t-31 5t-24 1q-34 0 -55 -17.5t-21 -54.5v-106h226v-234h-226z" />
-<glyph unicode="g" horiz-adv-x="1180" d="M385 -188q105 7 177 20t116 38t63 63t19 94v28q-47 -46 -105.5 -63t-119.5 -17q-95 0 -174 34.5t-137 103.5t-90 171.5t-32 239.5q0 128 31.5 229.5t89 172t137.5 108t177 37.5q140 0 223 -80v56h266v-916q0 -59 -5.5 -117.5t-18 -110.5t-32.5 -96t-50 -75 q-40 -40 -86 -65.5t-95.5 -41.5t-102 -23.5t-104.5 -11.5zM578 231q65 0 108 22t74 62v414q-12 15 -28 30.5t-38.5 28t-51 20t-64.5 7.5q-101 0 -155 -75t-54 -216q0 -64 12.5 -117.5t38.5 -92.5t65.5 -61t92.5 -22z" />
-<glyph unicode="h" d="M801 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1356l266 123v-531q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266z" />
-<glyph unicode="i" horiz-adv-x="578" d="M156 0v1047h266v-1047h-266zM289 1169q-32 0 -60.5 12.5t-49.5 33.5t-33.5 49.5t-12.5 60.5t12.5 60t33.5 49t49.5 33t60.5 12t60.5 -12t49.5 -33t33 -49t12 -60t-12 -60.5t-33 -49.5t-49.5 -33.5t-60.5 -12.5z" />
-<glyph unicode="j" horiz-adv-x="578" d="M-137 -227q81 18 137 37.5t90.5 45t50 58.5t15.5 78v1055h266v-990q0 -45 -4 -89.5t-15 -87t-31 -81.5t-53 -73q-37 -39 -77.5 -66.5t-82 -46.5t-83 -31t-78.5 -20zM289 1169q-32 0 -60.5 12.5t-49.5 33.5t-33.5 49.5t-12.5 60.5t12.5 60t33.5 49t49.5 33t60.5 12 t60.5 -12t49.5 -33t33 -49t12 -60t-12 -60.5t-33 -49.5t-49.5 -33.5t-60.5 -12.5z" />
-<glyph unicode="k" horiz-adv-x="1159" d="M807 0l-289 553l-98 -94v-459h-266v1354l266 125v-717l280 285h320l-307 -304l385 -743h-291z" />
-<glyph unicode="l" horiz-adv-x="594" d="M164 0v1354l266 125v-1479h-266z" />
-<glyph unicode="m" horiz-adv-x="1792" d="M1382 0v616q0 66 -16 105.5t-40 60.5t-53 27t-56 6q-30 0 -62.5 -6.5t-59 -27t-44 -59.5t-17.5 -104v-618h-266v616q0 66 -16 105.5t-40 60.5t-53.5 27t-56.5 6q-29 0 -61 -6.5t-59 -27t-44.5 -59.5t-17.5 -104v-618h-266v1047h266v-97q42 56 103 88.5t145 32.5 q99 0 165 -37.5t103 -109.5q56 77 140 112t181 35q83 0 155 -20.5t124.5 -68.5t82.5 -127.5t30 -197.5v-657h-267z" />
-<glyph unicode="n" d="M801 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1047h266v-99q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266z" />
-<glyph unicode="o" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20z" />
-<glyph unicode="p" horiz-adv-x="1180" d="M643 -25q-77 0 -133 28t-90 73v-365l-266 -121v1457h266v-76q34 45 90.5 72.5t134.5 27.5q97 0 177 -36t136.5 -105.5t87.5 -172t31 -235.5q0 -128 -32.5 -229.5t-90.5 -172t-137.5 -108t-173.5 -37.5zM602 231q100 0 154.5 75.5t54.5 215.5q0 67 -13.5 121t-40 92 t-65.5 59t-90 21q-65 0 -109 -22t-73 -62v-414q10 -15 27 -30.5t40 -28t52 -20t63 -7.5z" />
-<glyph unicode="q" horiz-adv-x="1180" d="M537 -25q-94 0 -173.5 37.5t-137.5 108t-91 172t-33 229.5q0 133 32.5 235.5t91.5 172t141 105.5t180 36q75 0 127 -25.5t86 -74.5v76h266v-1336l-266 -121v486q-34 -45 -90 -73t-133 -28zM578 231q34 0 62.5 7.5t52 20t40.5 28t27 30.5v414q-29 40 -73 62t-109 22 q-51 0 -90 -21t-65.5 -59t-40 -92t-13.5 -121q0 -140 54 -215.5t155 -75.5z" />
-<glyph unicode="r" horiz-adv-x="879" d="M154 0v1047h266v-93q22 50 69.5 83.5t124.5 33.5q65 0 116.5 -21t96.5 -59l-36 -243q-38 26 -83.5 43.5t-107.5 17.5q-35 0 -67.5 -10t-57.5 -35t-40 -69t-15 -111v-584h-266z" />
-<glyph unicode="s" horiz-adv-x="1012" d="M698 758q-15 15 -34.5 31t-44.5 28.5t-56.5 20.5t-68.5 8q-60 0 -97 -20t-37 -58q0 -21 17 -39t45.5 -33t65.5 -28.5t77 -26.5q67 -22 129.5 -50t110 -68t76 -96t28.5 -134q0 -77 -29 -136t-82.5 -99.5t-129 -61.5t-168.5 -21q-64 0 -125.5 13.5t-116.5 38t-100 59.5 t-76 78l176 149q17 -16 41.5 -35.5t56 -36.5t69 -28.5t79.5 -11.5q69 0 113.5 20.5t44.5 71.5q0 25 -15.5 45t-44.5 37.5t-71 34.5t-95 36q-74 27 -134 56t-102 66.5t-64.5 87.5t-22.5 120q0 72 30 127t81.5 92.5t121 5 [...]
-<glyph unicode="t" horiz-adv-x="805" d="M465 -25q-68 0 -116 19t-78.5 54.5t-45 87t-14.5 116.5v561h-150v234h150v305l266 127v-432h234v-234h-234v-522q0 -40 19.5 -61t54.5 -21q40 0 83 11.5t89 33.5l-31 -231q-46 -23 -104 -35.5t-123 -12.5z" />
-<glyph unicode="u" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266z" />
-<glyph unicode="v" horiz-adv-x="1145" d="M711 0h-277l-383 1047h275l219 -621q8 -23 15 -47.5t13 -48.5q13 48 29 96l217 621h275z" />
-<glyph unicode="w" horiz-adv-x="1538" d="M1171 0h-235l-133 522q-4 14 -8.5 33.5l-9.5 41l-9.5 42.5t-7.5 39q-3 -18 -7.5 -39l-9.5 -42.5l-9.5 -41t-8.5 -33.5l-133 -522h-235l-293 1047h262l123 -510q8 -31 14 -68.5t12 -69.5l15 69.5t16 68.5l133 510h242l131 -510q8 -31 16 -68.5l15 -69.5q6 32 12 69.5 t14 68.5l127 510h262z" />
-<glyph unicode="x" d="M829 0l-165 236q-16 23 -32.5 49.5t-27.5 46.5q-9 -20 -25.5 -46t-33.5 -50l-164 -236h-309l379 543l-353 504h310l137 -199l33.5 -50.5t25.5 -45.5q11 20 27.5 46.5t32.5 49.5l139 199h309l-352 -504l379 -543h-310z" />
-<glyph unicode="y" horiz-adv-x="1167" d="M561 -397h-276l159 411l-393 1033h273l229 -605q18 -47 31 -96q6 25 14 49l16 47l230 605h272z" />
-<glyph unicode="z" horiz-adv-x="1114" d="M113 0v209l540 600h-501v238h839v-203l-538 -606h536v-238h-876z" />
-<glyph unicode="{" horiz-adv-x="778" d="M696 -266q-107 2 -193.5 33t-148.5 91t-95.5 149t-33.5 206v135q0 57 -20.5 92t-61.5 35h-61v242h61q41 0 61.5 35.5t20.5 91.5v133q0 117 33.5 206t95.5 149t148.5 91.5t193.5 34.5v-244q-102 -5 -153 -56.5t-51 -145.5v-148q0 -60 -10 -104.5t-28.5 -77t-45 -53.5 t-58.5 -33q65 -25 103.5 -93t38.5 -177v-148q0 -94 51 -146t153 -57v-241z" />
-<glyph unicode="|" horiz-adv-x="614" d="M174 -420v2069h266v-2069h-266z" />
-<glyph unicode="}" horiz-adv-x="778" d="M82 -25q102 5 153.5 57t51.5 146v148q0 109 38 177t103 93q-32 12 -58 33t-44.5 53.5t-28.5 77t-10 104.5v148q0 94 -51.5 145.5t-153.5 56.5v244q107 -3 194 -34.5t148.5 -91.5t95 -149t33.5 -206v-133q0 -56 21 -91.5t61 -35.5h61v-242h-61q-40 0 -61 -35t-21 -92v-135 q0 -117 -33.5 -206t-95 -149t-148.5 -91t-194 -33v241z" />
-<glyph unicode="~" horiz-adv-x="1290" d="M838 518q-66 0 -118.5 17.5t-96.5 38l-82.5 38t-75.5 17.5q-47 0 -68 -31.5t-26 -79.5h-211q0 73 16 140t51 118.5t90.5 82t133.5 30.5q65 0 116 -17.5t94.5 -38l82 -38t79.5 -17.5q47 0 68.5 30.5t26.5 80.5h210q0 -73 -15.5 -140t-50 -118.5t-90 -82t-134.5 -30.5z" />
-<glyph unicode="¡" horiz-adv-x="614" d="M176 -410v463l57 600h152l57 -600v-463h-266zM309 721q-38 0 -70.5 14t-57 38t-38.5 56t-14 68t14 67.5t38.5 55.5t57 37.5t70.5 13.5t70 -13.5t55.5 -37.5t37 -55.5t13.5 -67.5t-13.5 -68t-37 -56t-55.5 -38t-70 -14z" />
-<glyph unicode="¢" horiz-adv-x="1120" d="M428 -25v218q-77 22 -137.5 67.5t-102.5 112t-64 153t-22 191.5q0 103 22.5 190.5t65 155t103 114t135.5 68.5v213h256v-201q121 -21 205.5 -94t126.5 -205l-254 -84q-23 60 -68 98t-108 38q-49 0 -88.5 -21t-67 -59t-42.5 -92t-15 -121t15 -120.5t42 -91.5t65 -58.5 t85 -20.5q68 0 115 38.5t69 113.5l254 -76q-38 -138 -123.5 -218.5t-210.5 -103.5v-205h-256z" />
-<glyph unicode="£" horiz-adv-x="1229" d="M162 0v254q44 20 74 54.5t49 75.5t28.5 85t12.5 82h-172v246h110q-20 45 -36.5 105.5t-16.5 129.5q0 81 29 157.5t88.5 136t151.5 96t218 36.5q73 0 135 -9.5t117.5 -28.5t106.5 -47.5t102 -65.5l-104 -254q-28 23 -62 48.5t-77 47t-97 35.5t-121 14q-57 0 -97.5 -14 t-66 -36.5t-37.5 -52.5t-12 -61q0 -21 4.5 -43.5t13.5 -50t23.5 -62.5l34.5 -81h348v-246h-309q-5 -77 -28 -151.5t-72 -139.5h667v-260h-1005z" />
-<glyph unicode="¤" horiz-adv-x="1331" d="M666 174q-73 0 -135 15t-117 44l-142 -143l-198 199l149 149q-49 107 -49 244q0 69 14.5 130t38.5 114l-153 153l192 193l144 -144q54 29 117.5 45.5t138.5 16.5q68 0 131.5 -15.5t117.5 -46.5l142 144l200 -199l-151 -149q22 -51 36.5 -114t14.5 -128 q0 -69 -13.5 -132.5t-39.5 -117.5l153 -149l-194 -193l-145 146q-54 -29 -118.5 -45.5t-133.5 -16.5zM666 434q49 0 89.5 18.5t69 51.5t44.5 78.5t16 99.5t-16 99.5t-44.5 78.5t-69 51.5t-89.5 18.5t-89.5 -18.5t-69.5 -51.5t-45 [...]
-<glyph unicode="¥" horiz-adv-x="1245" d="M791 705h241v-201h-276v-133h276v-201h-276v-170h-267v170h-276v201h276v133h-276v201h240l-422 729h307l285 -510l280 510h311z" />
-<glyph unicode="¦" horiz-adv-x="614" d="M174 756v719h266v-719h-266zM174 -246v719h266v-719h-266z" />
-<glyph unicode="§" horiz-adv-x="1085" d="M741 1116q-16 16 -36.5 31.5t-46 27.5t-56 19.5t-65.5 7.5q-62 0 -99 -24t-37 -64q0 -23 20.5 -44t55 -41t80 -39.5l96.5 -39.5q62 -24 121.5 -52.5t106 -68.5t74.5 -96t28 -137q0 -75 -27.5 -130t-83.5 -95q57 -51 84 -111.5t27 -140.5q0 -86 -29 -153.5t-84.5 -114.5 t-136.5 -72t-184 -25q-143 0 -257.5 52t-189.5 149l193 164q20 -18 46 -37.5t58.5 -35.5t70.5 -26t81 -10q31 0 59 5.5t49.5 17.5t34.5 31.5t13 48.5q0 25 -20 48t-57 45.5t-89.5 46.5l-116.5 52q-56 24 -110 51 [...]
-<glyph unicode="¨" horiz-adv-x="973" d="M291 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM680 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5 t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5z" />
-<glyph unicode="©" horiz-adv-x="1729" d="M1182 483q-48 -105 -123.5 -159t-188.5 -54q-105 0 -176 39.5t-114 103t-61.5 143.5t-18.5 161q0 75 19.5 154.5t63 144t114 106t173.5 41.5q58 0 106.5 -14t87 -39t67.5 -60t49 -77l-187 -88q-23 38 -51 58t-72 20t-73.5 -23.5t-47.5 -59.5t-26 -79.5t-8 -83.5 q0 -43 9.5 -87t28.5 -79t48 -57.5t69 -22.5q41 0 71.5 21.5t51.5 56.5zM864 -25q-158 0 -293 57t-235 156.5t-156.5 235t-56.5 293.5q0 105 25.5 200.5t73 178t114.5 149.5t149.5 114.5t178 73t200.5 25.5t201 -25.5t17 [...]
-<glyph unicode="ª" horiz-adv-x="903" d="M575 580v61q-27 -31 -78.5 -56.5t-129.5 -25.5q-62 0 -115.5 18t-93.5 54t-63 89.5t-23 123.5q0 75 27.5 129t72.5 89t102.5 51.5t116.5 16.5q57 0 102 -10t82 -30v40q0 72 -37 105t-106 33t-128.5 -22t-115.5 -64l-67 176q60 42 145 71t191 29q55 0 114 -10.5t108 -45 t80.5 -100t31.5 -174.5v-548h-216zM414 752q54 0 94 20t67 49v92q-29 13 -67 21t-76 8q-30 0 -56.5 -5.5t-47 -17.5t-32 -30.5t-11.5 -44.5q0 -42 33 -67t96 -25z" />
-<glyph unicode="«" horiz-adv-x="1151" d="M373 0l-312 524l312 523h286l-311 -523l311 -524h-286zM834 0l-312 524l312 523h286l-311 -523l311 -524h-286z" />
-<glyph unicode="¬" horiz-adv-x="1290" d="M860 330v395h-696v258h962v-653h-266z" />
-<glyph unicode="­" horiz-adv-x="844" d="M154 477v256h536v-256h-536z" />
-<glyph unicode="®" horiz-adv-x="1729" d="M571 285v864h340q84 0 139 -23t87.5 -61t45.5 -86.5t13 -99.5q0 -38 -10 -75t-31.5 -69t-54.5 -58.5t-78 -43.5l37 -75l46 -94.5l47 -97l40 -81.5h-234l-153 323h-33v-323h-201zM911 799q23 0 39 7.5t26 19t14.5 25.5t4.5 28q0 12 -4 26t-13.5 25.5t-25.5 19.5t-41 8 h-139v-159h139zM864 -25q-158 0 -293 57t-235 156.5t-156.5 235t-56.5 293.5q0 105 25.5 200.5t73 178t114.5 149.5t149.5 114.5t178 73t200.5 25.5t201 -25.5t178 -73t149.5 -114.5t114.5 -149.5t73 -178t26 -200 [...]
-<glyph unicode="¯" horiz-adv-x="973" d="M147 1202v217h676v-217h-676z" />
-<glyph unicode="°" horiz-adv-x="844" d="M422 1145q28 0 51 10.5t40 29.5t26.5 44.5t9.5 54.5q0 28 -9.5 52.5t-26.5 42.5t-40 28t-51 10t-51 -10t-40 -28t-26.5 -42.5t-9.5 -52.5q0 -29 9.5 -54.5t26.5 -44.5t40 -29.5t51 -10.5zM422 915q-78 0 -145.5 28.5t-117.5 78.5t-79 117t-29 145t29 144.5t79 115.5 t117.5 77t145.5 28t145.5 -28t117.5 -77t79 -115.5t29 -144.5t-29 -145t-79 -117t-117.5 -78.5t-145.5 -28.5z" />
-<glyph unicode="±" horiz-adv-x="1290" d="M778 690v-297h-266v297h-348v258h348v297h266v-297h348v-258h-348zM164 0v258h962v-258h-962z" />
-<glyph unicode="²" horiz-adv-x="758" d="M49 909q0 100 23 179.5t63.5 141t95.5 107t119 78.5l35.5 20.5t31 22t21.5 25t8 30.5q0 29 -19.5 49.5t-57.5 20.5q-47 0 -74 -26t-41 -74l-203 49q17 57 47 104t71.5 80.5t94.5 52.5t115 19q68 0 123.5 -19.5t95 -54.5t61 -84t21.5 -108q0 -61 -18 -103.5t-48.5 -74.5 t-71.5 -56.5t-87 -48.5q-51 -28 -87 -57.5t-53 -63.5h363v-209h-629z" />
-<glyph unicode="³" horiz-adv-x="727" d="M53 1583q40 97 119 151t184 54q65 0 116 -20t86.5 -54t54 -78.5t18.5 -93.5q0 -32 -9 -60t-24 -52t-34.5 -42t-41.5 -30q63 -23 100 -72.5t37 -124.5q0 -58 -20.5 -107t-60.5 -84t-97.5 -55t-132.5 -20q-121 0 -203.5 66.5t-119.5 185.5l206 53q14 -54 45.5 -79t73.5 -25 t65 23t23 59q0 37 -25 59.5t-85 22.5h-78v188h76q46 0 65 21t19 49q0 29 -19 49t-53 20q-40 0 -61 -19.5t-35 -54.5z" />
-<glyph unicode="´" horiz-adv-x="973" d="M432 1180h-258l291 319h332z" />
-<glyph unicode="µ" horiz-adv-x="1221" d="M801 0v96q-29 -52 -85 -86.5t-134 -34.5q-53 0 -95 16t-65 36v-312l-268 -125v1457h266v-615q0 -53 14 -91t38.5 -62.5t58 -36t71.5 -11.5q42 0 78 10.5t63 34t42.5 61t15.5 91.5v619h266v-1047h-266z" />
-<glyph unicode="¶" horiz-adv-x="1325" d="M895 -221v1399h-150v-1467l-266 -121v971q-117 5 -200 45.5t-136 102t-77.5 136t-24.5 148.5q0 81 24.5 160.5t81 141.5t147.5 100.5t224 38.5h643v-1534z" />
-<glyph unicode="·" horiz-adv-x="494" d="M246 547q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66t13 66t36 54t54.5 36.5t68.5 13.5t68.5 -13.5t55 -36.5t37 -54t13.5 -66t-13.5 -66t-37 -53.5t-55 -35.5t-68.5 -13z" />
-<glyph unicode="¸" horiz-adv-x="973" d="M459 -500q-87 0 -141 27.5t-85 69.5l115 86q13 -16 34 -27.5t50 -11.5q48 0 73 21t25 59q0 37 -26 58t-74 21q-25 0 -46.5 -7t-41.5 -21l-55 67l147 178h144l-72 -114q60 0 103.5 -14t72 -39t42 -59t13.5 -73q0 -48 -18.5 -88.5t-54 -70t-87 -46t-118.5 -16.5z" />
-<glyph unicode="¹" horiz-adv-x="459" d="M125 909v635h-105v170q35 0 60 4t42.5 12t29.5 19t20 25h178v-865h-225z" />
-<glyph unicode="º" horiz-adv-x="952" d="M477 559q-90 0 -163 30.5t-124.5 88.5t-79.5 141.5t-28 188.5t28 188.5t79.5 141.5t124.5 89t163 31q91 0 163.5 -31t123.5 -89t78.5 -141.5t27.5 -188.5t-27.5 -188.5t-78.5 -141.5t-123.5 -88.5t-163.5 -30.5zM477 770q82 0 129 62.5t47 175.5q0 112 -47 176.5 t-129 64.5q-83 0 -129.5 -64.5t-46.5 -176.5q0 -113 46.5 -175.5t129.5 -62.5z" />
-<glyph unicode="»" horiz-adv-x="1151" d="M492 0l311 524l-311 523h286l312 -523l-312 -524h-286zM31 0l311 524l-311 523h286l312 -523l-312 -524h-286z" />
-<glyph unicode="¼" horiz-adv-x="1473" d="M28 -25l979 1483h256l-979 -1483h-256zM125 580v634h-105v170q35 0 60 4t42.5 12t29.5 19t20 25h178v-864h-225zM1102 0v164h-420v147l424 553h205v-512h115v-188h-115v-164h-209zM1102 352v238l-180 -238h180z" />
-<glyph unicode="½" horiz-adv-x="1554" d="M28 -25l979 1483h256l-979 -1483h-256zM125 580v634h-105v170q35 0 60 4t42.5 12t29.5 19t20 25h178v-864h-225zM848 623q17 57 47 104t71.5 80.5t94.5 52.5t115 19q68 0 123.5 -19.5t95 -55t61 -84.5t21.5 -108q0 -60 -18 -103t-48.5 -75t-71.5 -56t-87 -48 q-51 -28 -87 -57.5t-53 -63.5h363v-209h-629q0 100 23 179t63.5 140.5t95.5 107.5t119 79l35.5 20t31 22t21.5 25.5t8 30.5q0 29 -19.5 49.5t-57.5 20.5q-47 0 -74 -26.5t-41 -74.5z" />
-<glyph unicode="¾" horiz-adv-x="1712" d="M268 -25l979 1483h256l-979 -1483h-256zM53 1253q40 97 119 151t184 54q65 0 116 -20t86.5 -54t54 -78.5t18.5 -93.5q0 -32 -9 -60t-24 -52t-34.5 -42t-41.5 -30q63 -23 100 -72.5t37 -124.5q0 -58 -20.5 -107t-60.5 -84t-97.5 -55t-132.5 -20q-121 0 -203.5 66.5 t-119.5 185.5l206 53q14 -54 45.5 -79t73.5 -25t65 23.5t23 58.5q0 37 -25 59.5t-85 22.5h-78v188h76q46 0 65 21t19 49q0 29 -19 49t-53 20q-40 0 -61 -19.5t-35 -53.5zM1341 0v164h-420v147l424 553h205v-512h115v- [...]
-<glyph unicode="¿" horiz-adv-x="1034" d="M668 662v-74q0 -95 -24.5 -160t-61 -112.5t-79.5 -81.5t-79.5 -67.5t-61 -70.5t-24.5 -90q0 -35 11.5 -64t34 -50.5t56 -33.5t76.5 -12q102 0 156.5 63t54.5 179h266q0 -120 -35 -212.5t-98 -156t-151 -96.5t-193 -33q-108 0 -191 32t-139.5 88.5t-85 132.5t-28.5 163 q0 84 24.5 142.5t61 101.5t79 77t79 71t61 82t24.5 110v72h267zM532 733q-37 0 -68.5 13.5t-55 36.5t-37 54t-13.5 66t13.5 66t37 53.5t55 35.5t68.5 13t68.5 -13t55 -35.5t36.5 -53.5t13 -66t-13 -66t-36.5 -54t [...]
-<glyph unicode="À" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM781 1569l-365 319h332l291 -319h-258z" />
-<glyph unicode="Á" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM739 1569h-258l291 319h332z" />
-<glyph unicode="Â" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM931 1569l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ã" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM923 1583q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 113t39.5 90 t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 66.5h125q0 -62 -12 -113.5t-40 -89.5t-73 -59t-111 -21z" />
-<glyph unicode="Ä" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM565 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32 t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM954 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11 [...]
-<glyph unicode="Å" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l502 1315q-39 35 -61.5 85t-22.5 107q0 55 21 102.5t57 83t84.5 56t103.5 20.5q54 0 102.5 -20.5t85 -56t57.5 -83t21 -102.5q0 -57 -23 -107t-63 -85l504 -1315h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62 l-142 -373h373zM760 1425q32 0 56 23.5t24 58.5t-24 58.5t-56 23.5q-34 0 -57 -23.5t-23 -58.5t23 -58.5t57 -23.5z" />
-<glyph unicode="Æ" horiz-adv-x="1851" d="M805 0v297h-371l-168 -297h-297l832 1434h895v-263h-615v-301h326v-262h-326v-346h658v-262h-934zM805 549v409q-9 -24 -21.5 -49l-27.5 -53l-174 -307h223z" />
-<glyph unicode="Ç" horiz-adv-x="1352" d="M719 -500q-87 0 -140.5 27.5t-84.5 69.5l114 86q13 -16 34 -27.5t50 -11.5q48 0 73.5 21t25.5 59q0 37 -26.5 58t-74.5 21q-25 0 -46.5 -7t-41.5 -21l-55 67l115 140q-98 9 -175.5 44t-136.5 89t-100 123.5t-67 148t-38 162.5t-12 168q0 82 14 168.5t43.5 168.5t77 155 t114 128t154.5 87t199 32q103 0 187 -28t148 -73.5t108.5 -103.5t68.5 -118l-248 -113q-26 40 -53 72t-58 53.5t-68.5 33t-84.5 11.5q-60 0 -107 -20.5t-82 -55.5t-59 -82t-39 -99.5t-21.5 -108t-6.5 -107.5q0 - [...]
-<glyph unicode="È" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM670 1569l-365 319h332l291 -319h-258z" />
-<glyph unicode="É" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM708 1569h-258l291 319h332z" />
-<glyph unicode="Ê" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM860 1569l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ë" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM496 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5z M885 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5z" />
-<glyph unicode="Ì" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM354 1569l-365 319h332l291 -319h-258z" />
-<glyph unicode="Í" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM312 1569h-258l291 319h332z" />
-<glyph unicode="Î" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM504 1569l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ï" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM138 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM527 1579q-32 0 -59 11.5t-47.5 31.5 t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5z" />
-<glyph unicode="Ð" horiz-adv-x="1495" d="M184 588h-127v258h127v588h484q125 0 225 -28.5t177.5 -78.5t132.5 -118.5t90.5 -148t52 -167.5t16.5 -176q0 -82 -15.5 -167.5t-49.5 -165t-88.5 -149.5t-133 -122.5t-181.5 -82.5t-235 -30h-475v588zM692 262q107 0 180 41.5t118 107t65 146.5t20 160q0 84 -21 165.5 t-64.5 145.5t-112 103.5t-162.5 39.5h-254v-325h293v-258h-293v-326h231z" />
-<glyph unicode="Ñ" horiz-adv-x="1511" d="M1081 0l-569 836q-14 21 -30.5 51.5t-28.5 56.5q3 -23 4.5 -54t1.5 -54v-836h-275v1434h260l555 -826q13 -20 30 -50t30 -58q-3 29 -4.5 58.5t-1.5 49.5v826h274v-1434h-246zM936 1583q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5 h-125q0 61 12 113t39.5 90t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 66.5h125q0 -62 -12 -113.5t-40 -89.5t-73 -59t-111 -21z" />
-<glyph unicode="Ò" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7. [...]
-<glyph unicode="Ó" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7. [...]
-<glyph unicode="Ô" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7. [...]
-<glyph unicode="Õ" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7. [...]
-<glyph unicode="Ö" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7. [...]
-<glyph unicode="×" horiz-adv-x="1290" d="M647 514l-303 -305l-192 194l303 302l-301 303l186 186l303 -301l301 303l195 -192l-305 -304l303 -301l-189 -188z" />
-<glyph unicode="Ø" horiz-adv-x="1530" d="M133 -25l152 226q-40 53 -69 114.5t-47.5 128t-27 136t-8.5 137.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5q94 0 172 -20t143 -58l54 78h262l-152 -225q40 -53 69 -114.5t47.5 -128t27 -136t8.5 -137.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154 t-120 -124t-161.5 -83t-207.5 -30.5q-96 0 -175 20.5t-142 57.5l-54 -78h-262zM420 717q0 -63 10 -128t33 -124l463 684q-34 20 -73 31.5t-87 11.5q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t [...]
-<glyph unicode="Ù" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM770 1569l-365 319h332l291 -319h-258z" />
-<glyph unicode="Ú" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM770 1569h-258l291 319h332z" />
-<glyph unicode="Û" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM921 1569l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ü" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM555 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM944 1579 [...]
-<glyph unicode="Ý" horiz-adv-x="1458" d="M592 0v559l-531 875h314l354 -586l354 586h314l-531 -875v-559h-274zM751 1569h-258l291 319h332z" />
-<glyph unicode="Þ" horiz-adv-x="1370" d="M184 0v1434h277v-252h340q136 0 229 -39.5t150.5 -104t82.5 -145.5t25 -164q0 -78 -27 -159t-85.5 -146t-151 -106.5t-223.5 -41.5h-340v-276h-277zM811 539q53 0 89 17t58.5 44t32.5 61t10 68q0 31 -8.5 64.5t-30 61.5t-58.5 46.5t-93 18.5h-350v-381h350z" />
-<glyph unicode="ß" horiz-adv-x="1178" d="M668 -25q-72 0 -126.5 13t-92.5 35l118 194q25 -8 43 -10t41 -2q34 0 66 15t57 44t40 70.5t15 95.5q0 43 -13.5 85t-42.5 75t-74 53.5t-107 20.5h-76v229h76q35 0 65 13t52 36t34.5 54.5t12.5 68.5q0 34 -11 63.5t-32 52t-51.5 35.5t-69.5 13q-45 0 -81 -14.5t-61 -46 t-38 -81.5t-13 -122v-1250l-266 -125v1371q0 135 38 229.5t102 154t146 86.5t171 27q97 0 176.5 -27t136.5 -76t88 -119t31 -155q0 -48 -11.5 -92.5t-34.5 -82t-59.5 -68t-85.5 -50.5q61 -17 110.5 -49.5t83.5 -7 [...]
-<glyph unicode="à" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -37 [...]
-<glyph unicode="á" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -37 [...]
-<glyph unicode="â" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -37 [...]
-<glyph unicode="ã" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -37 [...]
-<glyph unicode="ä" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -37 [...]
-<glyph unicode="å" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -37 [...]
-<glyph unicode="æ" horiz-adv-x="1753" d="M1196 -25q-121 0 -215 43.5t-156 124.5q-83 -92 -179 -130t-193 -38q-75 0 -141 22t-115 66t-77 108.5t-28 150.5q0 92 34 158t88.5 109t124.5 63t142 20q71 0 126 -13t100 -36v49q0 86 -45 126t-130 40t-156.5 -27t-139.5 -78l-82 215q36 25 80 47.5t95.5 39.5 t109.5 26.5t122 9.5q41 0 84.5 -4.5t85.5 -17t79.5 -35t66.5 -58.5q55 56 137.5 85.5t181.5 29.5q97 0 182 -30t148 -91t99 -153.5t36 -218.5q0 -31 -3 -72t-11 -86h-678q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 1 [...]
-<glyph unicode="ç" horiz-adv-x="1120" d="M543 -500q-87 0 -141 27.5t-85 69.5l115 86q13 -16 34 -27.5t50 -11.5q48 0 73 21t25 59q0 37 -26 58t-74 21q-25 0 -46.5 -7t-41.5 -21l-55 67l116 142q-89 13 -160 56t-121 112t-77 162.5t-27 207.5q0 128 34 230t96 173t150.5 108.5t197.5 37.5q160 0 272.5 -73.5 t163.5 -233.5l-254 -84q-23 60 -68 97.5t-108 37.5q-49 0 -88.5 -21t-67 -59t-42.5 -92t-15 -121t15 -120.5t42 -91.5t65 -58.5t85 -20.5q68 0 115 39t69 113l254 -76q-43 -152 -140 -234t-243 -96l-45 -71q60 0 1 [...]
-<glyph unicode="è" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM613 1180l-365 319h332l291 -319h-258z" />
-<glyph unicode="é" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM573 1180h-258l291 319h332z" />
-<glyph unicode="ê" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM766 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="ë" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM400 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -4 [...]
-<glyph unicode="ì" horiz-adv-x="578" d="M272 1180l-365 319h332l291 -319h-258zM156 0v1047h266v-1047h-266z" />
-<glyph unicode="í" horiz-adv-x="578" d="M310 1180h-258l291 319h332zM156 0v1047h266v-1047h-266z" />
-<glyph unicode="î" horiz-adv-x="578" d="M156 0v1047h266v-1047h-266zM461 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="ï" horiz-adv-x="578" d="M95 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM484 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5 t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM156 0v1047h266v-1047h-266z" />
-<glyph unicode="ð" horiz-adv-x="1167" d="M584 -25q-104 0 -192 36t-152.5 100t-101 151.5t-36.5 190.5q0 97 33.5 184.5t94 153.5t145.5 104.5t188 38.5q57 0 107 -16.5t88 -45.5q-7 30 -20 61.5t-30.5 61t-38 55.5t-42.5 46l-119 -123l-148 137l82 84q-40 12 -85 19.5t-93 7.5v237q110 0 199.5 -19.5 t161.5 -52.5l116 121l146 -139l-90 -94q70 -61 120.5 -141.5t83.5 -174.5t48.5 -198t15.5 -211q0 -119 -29 -223.5t-88.5 -182.5t-150 -123t-213.5 -45zM584 231q45 0 84.5 17t68.5 46.5t45.5 69.5t16.5 87q0 51 -16.5 92 [...]
-<glyph unicode="ñ" d="M801 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1047h266v-99q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266zM768 1194q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5 q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 113t39.5 90t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 66.5h125q0 -62 -12 -113.5t-40 -89.5t-73 -59t-111 -21z" />
-<glyph unicode="ò" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM602 1180l-365 319h332l291 -319h-258z" />
-<glyph unicode="ó" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM561 1180h-258l291 319h332z" />
-<glyph unicode="ô" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM753 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="õ" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM743 1194q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 1 [...]
-<glyph unicode="ö" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM387 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 - [...]
-<glyph unicode="÷" horiz-adv-x="1290" d="M164 573v258h962v-258h-962zM645 913q-34 0 -63 12.5t-50.5 33.5t-34 49.5t-12.5 60.5t12.5 60.5t34 49.5t50.5 33.5t63 12.5t63 -12.5t50.5 -33.5t34 -49.5t12.5 -60.5t-12.5 -60.5t-34 -49.5t-50.5 -33.5t-63 -12.5zM645 180q-34 0 -63 12.5t-50.5 33.5t-34 49.5 t-12.5 60.5t12.5 60.5t34 49.5t50.5 33.5t63 12.5t63 -12.5t50.5 -33.5t34 -49.5t12.5 -60.5t-12.5 -60.5t-34 -49.5t-50.5 -33.5t-63 -12.5z" />
-<glyph unicode="ø" horiz-adv-x="1169" d="M102 -25l111 168q-54 70 -82.5 165.5t-28.5 213.5q0 128 34 230t96.5 173t151.5 108.5t200 37.5q60 0 114 -11.5t101 -33.5l28 45h240l-113 -170q54 -71 82.5 -165.5t28.5 -213.5q0 -128 -33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5q-60 0 -113.5 11.5t-99.5 31.5 l-29 -43h-240zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 34 -4 67.5t-11 55.5l-266 -403q16 -7 31.5 -9t34.5 -2zM369 522q0 -34 4 -65.5t10 -57.5l268 406q-12 5 -31 7.5t-36 2.5q-50 0 -90 -20t-67.5 - [...]
-<glyph unicode="ù" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM615 1180l-365 319h332l291 -319h-258z" />
-<glyph unicode="ú" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM573 1180h-258l291 319h332z" />
-<glyph unicode="û" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM766 1180l-172 141l-172 -141h-245l290 319h256 l289 -319h-246z" />
-<glyph unicode="ü" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM400 1190q-32 0 -59 11.5t-47.5 31.5t-32 47 t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM789 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 3 [...]
-<glyph unicode="ý" horiz-adv-x="1167" d="M561 -397h-276l159 411l-393 1033h273l229 -605q18 -47 31 -96q6 25 14 49l16 47l230 605h272zM567 1180h-258l291 319h332z" />
-<glyph unicode="þ" horiz-adv-x="1169" d="M633 -25q-78 0 -133.5 28t-89.5 73v-355l-267 -124v1734l267 127v-487q34 45 90 72.5t135 27.5q97 0 177 -36t136.5 -105.5t87.5 -172t31 -235.5q0 -128 -32.5 -229.5t-90.5 -172t-137.5 -108t-173.5 -37.5zM592 231q100 0 154.5 75.5t54.5 215.5q0 67 -13.5 121t-40 92 t-65.5 59t-90 21q-65 0 -109 -22t-73 -62v-414q10 -15 27 -30.5t40 -28t52 -20t63 -7.5z" />
-<glyph unicode="ÿ" horiz-adv-x="1167" d="M561 -397h-276l159 411l-393 1033h273l229 -605q18 -47 31 -96q6 25 14 49l16 47l230 605h272zM391 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47 t-47.5 -31.5t-58.5 -11.5zM780 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t [...]
-<glyph unicode="Ā" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM421 1591v217h676v-217h-676z" />
-<glyph unicode="ā" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -3 [...]
-<glyph unicode="Ă" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM759 1560q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5 t33.5 50t11 57h205q0 -69 -23.5 -132.5t-71.5 -111.5t-122 -76.5t-175 -28.5z" />
-<glyph unicode="ă" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -3 [...]
-<glyph unicode="Ą" horiz-adv-x="1520" d="M1176 -500q-58 0 -105 16t-80 45t-51 69.5t-18 90.5q0 91 59.5 162t165.5 117l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434q-66 0 -122 -23t-96.5 -59.5t-63.5 -82.5t-23 -91q0 -48 23 -69t55 -21q27 0 47 11.5t32 27.5l123 -88q-15 -17 -36.5 -35.5t-50.5 -34 t-67.5 -25.5t-88.5 -10zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373z" />
-<glyph unicode="ą" horiz-adv-x="1143" d="M772 -500q-58 0 -105 16t-80 45t-51 69.5t-18 90.5q0 93 60.5 164t169.5 117v74q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54 q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668q-48 -14 -97 -38t-88.5 -56.5t-64.5 -7 [...]
-<glyph unicode="Ć" horiz-adv-x="1352" d="M735 -23q-112 0 -200 30t-155 83t-114 124t-76.5 153.5t-43 171.5t-13.5 178q0 82 14 168.5t43.5 168.5t77 155t114 128t154.5 87t199 32q103 0 187 -28t148 -73.5t108.5 -103.5t68.5 -118l-248 -113q-26 40 -53 72t-58 53.5t-68.5 33t-84.5 11.5q-60 0 -107 -20.5 t-82 -55.5t-59 -82t-39 -99.5t-21.5 -108t-6.5 -107.5q0 -84 18 -168t56 -152t97.5 -110.5t143.5 -42.5q44 0 81 13.5t68.5 37.5t59 58t51.5 75l254 -96q-34 -79 -82 -144.5t-111.5 -112t-143.5 -72.5t-177 -26zM76 [...]
-<glyph unicode="ć" horiz-adv-x="1120" d="M580 -25q-109 0 -197.5 37.5t-150.5 108t-96 172t-34 229.5t34 230t96 173t150.5 108.5t197.5 37.5q160 0 272.5 -73.5t163.5 -233.5l-254 -84q-23 60 -68 97.5t-108 37.5q-49 0 -88.5 -21t-67 -59t-42.5 -92t-15 -121t15 -120.5t42 -91.5t65 -58.5t85 -20.5q68 0 115 39 t69 113l254 -76q-47 -165 -157.5 -248.5t-280.5 -83.5zM567 1180h-258l291 319h332z" />
-<glyph unicode="Ĉ" horiz-adv-x="1352" d="M735 -23q-112 0 -200 30t-155 83t-114 124t-76.5 153.5t-43 171.5t-13.5 178q0 82 14 168.5t43.5 168.5t77 155t114 128t154.5 87t199 32q103 0 187 -28t148 -73.5t108.5 -103.5t68.5 -118l-248 -113q-26 40 -53 72t-58 53.5t-68.5 33t-84.5 11.5q-60 0 -107 -20.5 t-82 -55.5t-59 -82t-39 -99.5t-21.5 -108t-6.5 -107.5q0 -84 18 -168t56 -152t97.5 -110.5t143.5 -42.5q44 0 81 13.5t68.5 37.5t59 58t51.5 75l254 -96q-34 -79 -82 -144.5t-111.5 -112t-143.5 -72.5t-177 -26zM91 [...]
-<glyph unicode="ĉ" horiz-adv-x="1120" d="M580 -25q-109 0 -197.5 37.5t-150.5 108t-96 172t-34 229.5t34 230t96 173t150.5 108.5t197.5 37.5q160 0 272.5 -73.5t163.5 -233.5l-254 -84q-23 60 -68 97.5t-108 37.5q-49 0 -88.5 -21t-67 -59t-42.5 -92t-15 -121t15 -120.5t42 -91.5t65 -58.5t85 -20.5q68 0 115 39 t69 113l254 -76q-47 -165 -157.5 -248.5t-280.5 -83.5zM759 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ċ" horiz-adv-x="1352" d="M735 -23q-112 0 -200 30t-155 83t-114 124t-76.5 153.5t-43 171.5t-13.5 178q0 82 14 168.5t43.5 168.5t77 155t114 128t154.5 87t199 32q103 0 187 -28t148 -73.5t108.5 -103.5t68.5 -118l-248 -113q-26 40 -53 72t-58 53.5t-68.5 33t-84.5 11.5q-60 0 -107 -20.5 t-82 -55.5t-59 -82t-39 -99.5t-21.5 -108t-6.5 -107.5q0 -84 18 -168t56 -152t97.5 -110.5t143.5 -42.5q44 0 81 13.5t68.5 37.5t59 58t51.5 75l254 -96q-34 -79 -82 -144.5t-111.5 -112t-143.5 -72.5t-177 -26zM73 [...]
-<glyph unicode="ċ" horiz-adv-x="1120" d="M580 -25q-109 0 -197.5 37.5t-150.5 108t-96 172t-34 229.5t34 230t96 173t150.5 108.5t197.5 37.5q160 0 272.5 -73.5t163.5 -233.5l-254 -84q-23 60 -68 97.5t-108 37.5q-49 0 -88.5 -21t-67 -59t-42.5 -92t-15 -121t15 -120.5t42 -91.5t65 -58.5t85 -20.5q68 0 115 39 t69 113l254 -76q-47 -165 -157.5 -248.5t-280.5 -83.5zM587 1190q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 [...]
-<glyph unicode="Č" horiz-adv-x="1352" d="M735 -23q-112 0 -200 30t-155 83t-114 124t-76.5 153.5t-43 171.5t-13.5 178q0 82 14 168.5t43.5 168.5t77 155t114 128t154.5 87t199 32q103 0 187 -28t148 -73.5t108.5 -103.5t68.5 -118l-248 -113q-26 40 -53 72t-58 53.5t-68.5 33t-84.5 11.5q-60 0 -107 -20.5 t-82 -55.5t-59 -82t-39 -99.5t-21.5 -108t-6.5 -107.5q0 -84 18 -168t56 -152t97.5 -110.5t143.5 -42.5q44 0 81 13.5t68.5 37.5t59 58t51.5 75l254 -96q-34 -79 -82 -144.5t-111.5 -112t-143.5 -72.5t-177 -26zM86 [...]
-<glyph unicode="č" horiz-adv-x="1120" d="M580 -25q-109 0 -197.5 37.5t-150.5 108t-96 172t-34 229.5t34 230t96 173t150.5 108.5t197.5 37.5q160 0 272.5 -73.5t163.5 -233.5l-254 -84q-23 60 -68 97.5t-108 37.5q-49 0 -88.5 -21t-67 -59t-42.5 -92t-15 -121t15 -120.5t42 -91.5t65 -58.5t85 -20.5q68 0 115 39 t69 113l254 -76q-47 -165 -157.5 -248.5t-280.5 -83.5zM694 1180h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="Ď" horiz-adv-x="1495" d="M184 1434h484q125 0 225 -28.5t177.5 -78.5t132.5 -118.5t90.5 -148t52 -167.5t16.5 -176q0 -82 -15.5 -167.5t-49.5 -165t-88.5 -149.5t-133 -122.5t-181.5 -82.5t-235 -30h-475v1434zM692 262q107 0 180 41.5t118 107t65 146.5t20 160q0 84 -21 165.5t-64.5 145.5 t-112 103.5t-162.5 39.5h-254v-909h231zM802 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ď" horiz-adv-x="1542" d="M535 -25q-98 0 -178 36t-136.5 105.5t-87.5 172t-31 235.5q0 128 33 229.5t91 172t137.5 108t173.5 37.5q77 0 133 -27.5t90 -72.5v387l266 121v-1479h-266v76q-32 -48 -88.5 -74.5t-136.5 -26.5zM578 231q65 0 109 22t73 62v414q-10 15 -27 30.5t-40.5 28t-52 20 t-62.5 7.5q-101 0 -155 -75t-54 -216q0 -67 13.5 -121t40 -92.5t65.5 -59t90 -20.5zM1491 1459v-16q0 -86 -16.5 -159t-46 -135.5t-71 -116.5t-91.5 -103l-86 65q14 28 25.5 77t20 110.5t13 132.5t4.5 145h248z" />
-<glyph unicode="Đ" horiz-adv-x="1495" d="M184 588h-127v258h127v588h484q125 0 225 -28.5t177.5 -78.5t132.5 -118.5t90.5 -148t52 -167.5t16.5 -176q0 -82 -15.5 -167.5t-49.5 -165t-88.5 -149.5t-133 -122.5t-181.5 -82.5t-235 -30h-475v588zM692 262q107 0 180 41.5t118 107t65 146.5t20 160q0 84 -21 165.5 t-64.5 145.5t-112 103.5t-162.5 39.5h-254v-325h293v-258h-293v-326h231z" />
-<glyph unicode="đ" horiz-adv-x="1180" d="M1026 1100v-1100h-266v76q-32 -48 -88.5 -74.5t-136.5 -26.5q-98 0 -178 35.5t-136.5 102t-87.5 162.5t-31 217q0 116 33 210.5t91 162t137.5 104.5t173.5 37q77 0 133 -28t90 -73v195h-238v205h238v53l266 121v-174h133v-205h-133zM578 231q65 0 109 22t73 62v349 q-10 15 -27 30.5t-40.5 28t-52 20t-62.5 7.5q-50 0 -89 -19t-65.5 -53t-40.5 -81.5t-14 -104.5q0 -55 13.5 -102.5t40 -83t65.5 -55.5t90 -20z" />
-<glyph unicode="Ē" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM352 1591v217h676v-217h-676z" />
-<glyph unicode="ē" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM256 1202v217h676v-217h-676z" />
-<glyph unicode="Ĕ" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM690 1560q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5t33.5 50t11 57h205q0 -69 -23.5 -132.5t-71.5 -111.5t-122 -76.5t-175 -28.5z " />
-<glyph unicode="ĕ" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM594 1171q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5t33.5 50 [...]
-<glyph unicode="Ė" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM690 1579q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 -59t-32 -47t-48 -31.5t-58.5 -11.5z" />
-<glyph unicode="ė" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM594 1190q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t1 [...]
-<glyph unicode="Ę" horiz-adv-x="1319" d="M903 -500q-57 0 -104 16t-80.5 45t-51.5 69.5t-18 90.5q0 91 59.5 162.5t165.5 116.5h-690v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-47q-66 0 -120 -24.5t-92.5 -62t-59.5 -82.5t-21 -87q0 -48 23 -69t55 -21q28 0 48 11.5t32 27.5l123 -88 q-15 -17 -36.5 -35.5t-51 -34t-68 -25.5t-88.5 -10z" />
-<glyph unicode="ę" horiz-adv-x="1147" d="M606 -500q-57 0 -104 16t-80.5 45t-51.5 69.5t-18 90.5q0 81 47.5 145.5t141.5 110.5q-104 9 -185.5 51t-138 112.5t-86 167t-29.5 216.5q0 128 34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682 q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-35 -44 -79 -76.5t-89.5 -60l-89.5 -53t-78 -56.5t-55 -69.5t-21 -92.5q0 -48 23 -69t55 -21q28 0 48 11.5t32 27.5l123 -88q-15 -17 -36.5 -3 [...]
-<glyph unicode="Ě" horiz-adv-x="1319" d="M184 0v1434h979v-263h-702v-301h413v-262h-413v-346h745v-262h-1022zM817 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ě" horiz-adv-x="1147" d="M592 -25q-114 0 -204.5 37.5t-154 108.5t-97.5 173t-34 230t34 229.5t98 172t154.5 108t203.5 37.5q97 0 182 -30t147 -91t98 -153.5t36 -218.5q0 -31 -1 -73.5t-7 -84.5h-682q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126 25.5t99 82.5l164 -165q-66 -81 -162 -129 t-227 -48zM592 838q-94 0 -153.5 -47t-71.5 -146h428q-13 93 -68 143t-135 50zM721 1180h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="Ĝ" horiz-adv-x="1419" d="M764 -25q-111 0 -201 29t-160 80t-121 121.5t-84 152.5t-49 173.5t-16 185.5q0 87 16.5 176t51 171.5t86 154.5t122 125t159 83.5t196.5 30.5q104 0 183.5 -26t141 -68.5t107 -98t82.5 -114.5l-244 -139q-24 36 -52 68.5t-60.5 57.5t-71.5 39.5t-86 14.5 q-67 0 -118 -20.5t-89.5 -56t-64.5 -82.5t-42 -100t-23 -108.5t-7 -107.5q0 -91 19 -176.5t60.5 -152t107 -106.5t157.5 -40q52 0 97.5 18.5t79.5 50t53.5 74.5t19.5 92v12h-244v263h526v-211q0 -138 -41 -243.5t-112.5 -177t [...]
-<glyph unicode="ĝ" horiz-adv-x="1180" d="M385 -188q105 7 177 20t116 38t63 63t19 94v28q-47 -46 -105.5 -63t-119.5 -17q-95 0 -174 34.5t-137 103.5t-90 171.5t-32 239.5q0 128 31.5 229.5t89 172t137.5 108t177 37.5q140 0 223 -80v56h266v-916q0 -59 -5.5 -117.5t-18 -110.5t-32.5 -96t-50 -75 q-40 -40 -86 -65.5t-95.5 -41.5t-102 -23.5t-104.5 -11.5zM578 231q65 0 108 22t74 62v414q-12 15 -28 30.5t-38.5 28t-51 20t-64.5 7.5q-101 0 -155 -75t-54 -216q0 -64 12.5 -117.5t38.5 -92.5t65.5 -61t92.5 -22zM780 11 [...]
-<glyph unicode="Ğ" horiz-adv-x="1419" d="M764 -25q-111 0 -201 29t-160 80t-121 121.5t-84 152.5t-49 173.5t-16 185.5q0 87 16.5 176t51 171.5t86 154.5t122 125t159 83.5t196.5 30.5q104 0 183.5 -26t141 -68.5t107 -98t82.5 -114.5l-244 -139q-24 36 -52 68.5t-60.5 57.5t-71.5 39.5t-86 14.5 q-67 0 -118 -20.5t-89.5 -56t-64.5 -82.5t-42 -100t-23 -108.5t-7 -107.5q0 -91 19 -176.5t60.5 -152t107 -106.5t157.5 -40q52 0 97.5 18.5t79.5 50t53.5 74.5t19.5 92v12h-244v263h526v-211q0 -138 -41 -243.5t-112.5 -177t [...]
-<glyph unicode="ğ" horiz-adv-x="1180" d="M385 -188q105 7 177 20t116 38t63 63t19 94v28q-47 -46 -105.5 -63t-119.5 -17q-95 0 -174 34.5t-137 103.5t-90 171.5t-32 239.5q0 128 31.5 229.5t89 172t137.5 108t177 37.5q140 0 223 -80v56h266v-916q0 -59 -5.5 -117.5t-18 -110.5t-32.5 -96t-50 -75 q-40 -40 -86 -65.5t-95.5 -41.5t-102 -23.5t-104.5 -11.5zM578 231q65 0 108 22t74 62v414q-12 15 -28 30.5t-38.5 28t-51 20t-64.5 7.5q-101 0 -155 -75t-54 -216q0 -64 12.5 -117.5t38.5 -92.5t65.5 -61t92.5 -22zM608 11 [...]
-<glyph unicode="Ġ" horiz-adv-x="1419" d="M764 -25q-111 0 -201 29t-160 80t-121 121.5t-84 152.5t-49 173.5t-16 185.5q0 87 16.5 176t51 171.5t86 154.5t122 125t159 83.5t196.5 30.5q104 0 183.5 -26t141 -68.5t107 -98t82.5 -114.5l-244 -139q-24 36 -52 68.5t-60.5 57.5t-71.5 39.5t-86 14.5 q-67 0 -118 -20.5t-89.5 -56t-64.5 -82.5t-42 -100t-23 -108.5t-7 -107.5q0 -91 19 -176.5t60.5 -152t107 -106.5t157.5 -40q52 0 97.5 18.5t79.5 50t53.5 74.5t19.5 92v12h-244v263h526v-211q0 -138 -41 -243.5t-112.5 -177t [...]
-<glyph unicode="ġ" horiz-adv-x="1180" d="M385 -188q105 7 177 20t116 38t63 63t19 94v28q-47 -46 -105.5 -63t-119.5 -17q-95 0 -174 34.5t-137 103.5t-90 171.5t-32 239.5q0 128 31.5 229.5t89 172t137.5 108t177 37.5q140 0 223 -80v56h266v-916q0 -59 -5.5 -117.5t-18 -110.5t-32.5 -96t-50 -75 q-40 -40 -86 -65.5t-95.5 -41.5t-102 -23.5t-104.5 -11.5zM578 231q65 0 108 22t74 62v414q-12 15 -28 30.5t-38.5 28t-51 20t-64.5 7.5q-101 0 -155 -75t-54 -216q0 -64 12.5 -117.5t38.5 -92.5t65.5 -61t92.5 -22zM608 11 [...]
-<glyph unicode="Ģ" horiz-adv-x="1419" d="M764 -25q-111 0 -201 29t-160 80t-121 121.5t-84 152.5t-49 173.5t-16 185.5q0 87 16.5 176t51 171.5t86 154.5t122 125t159 83.5t196.5 30.5q104 0 183.5 -26t141 -68.5t107 -98t82.5 -114.5l-244 -139q-24 36 -52 68.5t-60.5 57.5t-71.5 39.5t-86 14.5 q-67 0 -118 -20.5t-89.5 -56t-64.5 -82.5t-42 -100t-23 -108.5t-7 -107.5q0 -91 19 -176.5t60.5 -152t107 -106.5t157.5 -40q52 0 97.5 18.5t79.5 50t53.5 74.5t19.5 92v12h-244v263h526v-211q0 -138 -41 -243.5t-112.5 -177t [...]
-<glyph unicode="ģ" horiz-adv-x="1180" d="M385 -188q105 7 177 20t116 38t63 63t19 94v28q-47 -46 -105.5 -63t-119.5 -17q-95 0 -174 34.5t-137 103.5t-90 171.5t-32 239.5q0 128 31.5 229.5t89 172t137.5 108t177 37.5q140 0 223 -80v56h266v-916q0 -59 -5.5 -117.5t-18 -110.5t-32.5 -96t-50 -75 q-40 -40 -86 -65.5t-95.5 -41.5t-102 -23.5t-104.5 -11.5zM578 231q65 0 108 22t74 62v414q-12 15 -28 30.5t-38.5 28t-51 20t-64.5 7.5q-101 0 -155 -75t-54 -216q0 -64 12.5 -117.5t38.5 -92.5t65.5 -61t92.5 -22zM731 16 [...]
-<glyph unicode="Ĥ" horiz-adv-x="1522" d="M1061 0v606h-600v-606h-277v1434h277v-566h600v566h276v-1434h-276zM864 1569l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="ĥ" d="M801 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1356l266 123v-531q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266zM780 1567l-172 141l-172 -141h-245l290 319h256l289 -319 h-246z" />
-<glyph unicode="Ħ" horiz-adv-x="1522" d="M1061 0v606h-600v-606h-277v1018h-164v262h164v154h277v-154h600v154h276v-154h164v-262h-164v-1018h-276zM1061 868v150h-600v-150h600z" />
-<glyph unicode="ħ" d="M801 0v553q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-557h-266v1100h-117v205h117v51l266 123v-174h270v-205h-270v-213q48 59 118.5 91t161.5 32q67 0 133 -20.5t118 -68.5t84 -127t32 -196v-598h-266z" />
-<glyph unicode="Ĩ" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM494 1583q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 113t39.5 90t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 66.5h125q0 -62 -12 -113.5 t-40 -89.5t-73 -59t-111 -21z" />
-<glyph unicode="ĩ" horiz-adv-x="578" d="M451 1194q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 113t39.5 90t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 66.5h125q0 -62 -12 -113.5t-40 -89.5t-73 -59t-111 -21zM156 0 v1047h266v-1047h-266z" />
-<glyph unicode="Ī" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM-6 1591v217h676v-217h-676z" />
-<glyph unicode="ī" horiz-adv-x="578" d="M-49 1202v217h676v-217h-676zM156 0v1047h266v-1047h-266z" />
-<glyph unicode="Ĭ" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM332 1560q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5t33.5 50t11 57h205q0 -69 -23.5 -132.5t-71.5 -111.5t-122 -76.5t-175 -28.5z" />
-<glyph unicode="ĭ" horiz-adv-x="578" d="M289 1171q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5t33.5 50t11 57h205q0 -69 -23.5 -132.5t-71.5 -111.5t-122 -76.5t-175 -28.5zM156 0v1047h266v-1047h-266z" />
-<glyph unicode="Į" horiz-adv-x="666" d="M223 -500q-57 0 -104 16t-80.5 45t-51.5 69.5t-18 90.5q0 91 59.5 162.5t166.5 116.5v1434h276v-1434q-53 0 -104 -21.5t-91.5 -57.5t-65 -82t-24.5 -95q0 -48 23 -69t55 -21q28 0 48 11.5t32 27.5l123 -88q-15 -17 -36.5 -35.5t-51 -34t-68 -25.5t-88.5 -10z" />
-<glyph unicode="į" horiz-adv-x="565" d="M176 -500q-57 0 -104 16t-80.5 45t-51.5 69.5t-18 90.5q0 91 59.5 162.5t168.5 116.5v1047h266v-1047q-78 -18 -131.5 -48t-85.5 -65.5t-46 -73t-14 -69.5q0 -48 23 -69t55 -21q28 0 48 11.5t32 27.5l123 -88q-15 -17 -36.5 -35.5t-51 -34t-68 -25.5t-88.5 -10zM283 1169 q-32 0 -60.5 12.5t-49.5 33.5t-33.5 49.5t-12.5 60.5t12.5 60t33.5 49t49.5 33t60.5 12t60.5 -12t49.5 -33t33 -49t12 -60t-12 -60.5t-33 -49.5t-49.5 -33.5t-60.5 -12.5z" />
-<glyph unicode="İ" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM332 1579q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 -59t-32 -47t-48 -31.5t-58.5 -11.5z" />
-<glyph unicode="ı" horiz-adv-x="578" d="M156 0v1047h266v-1047h-266z" />
-<glyph unicode="IJ" horiz-adv-x="1767" d="M195 0v1434h276v-1434h-276zM1101 -25q-78 0 -149.5 21.5t-133 62t-112 99t-86.5 133.5l242 112q40 -77 99.5 -119t139.5 -42q42 0 81 11.5t69 42.5t48 85.5t18 140.5v912h276v-936q0 -102 -21 -181t-57 -137t-84 -97.5t-103 -63t-113 -34t-114 -10.5z" />
-<glyph unicode="ij" horiz-adv-x="1085" d="M156 0v1047h266v-1047h-266zM289 1169q-32 0 -60.5 12.5t-49.5 33.5t-33.5 49.5t-12.5 60.5t12.5 60t33.5 49t49.5 33t60.5 12t60.5 -12t49.5 -33t33 -49t12 -60t-12 -60.5t-33 -49.5t-49.5 -33.5t-60.5 -12.5zM371 -227q81 18 137 37.5t90.5 45t50 58.5t15.5 78v1055 h266v-990q0 -45 -4 -89.5t-15 -87t-31 -81.5t-53 -73q-37 -39 -77.5 -66.5t-82 -46.5t-83 -31t-78.5 -20zM797 1169q-32 0 -60.5 12.5t-49.5 33.5t-33.5 49.5t-12.5 60.5t12.5 60t33.5 49t49.5 33t60.5 12t60.5 [...]
-<glyph unicode="Ĵ" horiz-adv-x="1198" d="M532 -25q-78 0 -149.5 21.5t-133 62t-112 99t-86.5 133.5l242 112q40 -77 99.5 -119t139.5 -42q42 0 81 11.5t69 42.5t48 85.5t18 140.5v912h276v-936q0 -102 -21 -181t-57 -137t-84 -97.5t-103 -63t-113 -34t-114 -10.5zM1060 1569l-172 141l-172 -141h-245l290 319h256 l289 -319h-246z" />
-<glyph unicode="ĵ" horiz-adv-x="578" d="M-137 -227q81 18 137 37.5t90.5 45t50 58.5t15.5 78v1055h266v-990q0 -45 -4 -89.5t-15 -87t-31 -81.5t-53 -73q-37 -39 -77.5 -66.5t-82 -46.5t-83 -31t-78.5 -20zM461 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ķ" horiz-adv-x="1501" d="M1096 0l-369 686l-266 -328v-358h-277v1434h277v-684l543 684h327l-418 -514l506 -920h-323zM647 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z " />
-<glyph unicode="ķ" horiz-adv-x="1159" d="M807 0l-289 553l-98 -94v-459h-266v1354l266 125v-717l280 285h320l-307 -304l385 -743h-291zM468 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158 z" />
-<glyph unicode="ĸ" horiz-adv-x="1122" d="M768 0l-238 442l-110 -133v-309h-266v1047h266v-390l311 390h291l-317 -392l356 -655h-293z" />
-<glyph unicode="Ĺ" horiz-adv-x="1184" d="M184 0v1434h277v-1168h692v-266h-969zM351 1569h-258l291 319h332z" />
-<glyph unicode="ĺ" horiz-adv-x="594" d="M164 0v1354l266 125v-1479h-266zM283 1549h-258l291 319h332z" />
-<glyph unicode="Ļ" horiz-adv-x="1184" d="M184 0v1434h277v-1168h692v-266h-969zM548 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="ļ" horiz-adv-x="594" d="M164 0v1354l266 125v-1479h-266zM175 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="Ľ" horiz-adv-x="1184" d="M184 0v1434h277v-1168h692v-266h-969zM455 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ľ" horiz-adv-x="913" d="M164 0v1354l266 125v-1479h-266zM862 1459v-16q0 -86 -16.5 -159t-46 -135.5t-71 -116.5t-91.5 -103l-86 65q14 28 25.5 77t20 110.5t13 132.5t4.5 145h248z" />
-<glyph unicode="Ŀ" horiz-adv-x="1184" d="M184 0v1434h277v-1168h692v-266h-969zM903 566q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 -59t-32 -47t-48 -31.5t-58.5 -11.5z" />
-<glyph unicode="ŀ" horiz-adv-x="854" d="M164 0v1354l266 125v-1479h-266zM723 497q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 -59t-32 -47t-48 -31.5t-58.5 -11.5z" />
-<glyph unicode="Ł" horiz-adv-x="1184" d="M184 0v428l-135 -104v303l135 102v705h277v-498l297 225v-301l-297 -223v-371h692v-266h-969z" />
-<glyph unicode="ł" horiz-adv-x="594" d="M162 0v428l-113 -86v264l113 86v662l266 125v-584l117 88v-264l-117 -88v-631h-266z" />
-<glyph unicode="Ń" horiz-adv-x="1511" d="M1081 0l-569 836q-14 21 -30.5 51.5t-28.5 56.5q3 -23 4.5 -54t1.5 -54v-836h-275v1434h260l555 -826q13 -20 30 -50t30 -58q-3 29 -4.5 58.5t-1.5 49.5v826h274v-1434h-246zM797 1569h-258l291 319h332z" />
-<glyph unicode="ń" d="M801 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1047h266v-99q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266zM588 1180h-258l291 319h332z" />
-<glyph unicode="Ņ" horiz-adv-x="1511" d="M1081 0l-569 836q-14 21 -30.5 51.5t-28.5 56.5q3 -23 4.5 -54t1.5 -54v-836h-275v1434h260l555 -826q13 -20 30 -50t30 -58q-3 29 -4.5 58.5t-1.5 49.5v826h274v-1434h-246zM641 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5 t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="ņ" d="M801 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1047h266v-99q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266zM483 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5 t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="Ň" horiz-adv-x="1511" d="M1081 0l-569 836q-14 21 -30.5 51.5t-28.5 56.5q3 -23 4.5 -54t1.5 -54v-836h-275v1434h260l555 -826q13 -20 30 -50t30 -58q-3 29 -4.5 58.5t-1.5 49.5v826h274v-1434h-246zM903 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ň" d="M801 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1047h266v-99q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266zM712 1180h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ʼn" horiz-adv-x="1622" d="M1213 0v614q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1047h266v-99q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-659h-266zM72 940q14 12 33 34t38.5 48t36 54t25.5 52q-50 16 -83.5 56.5 t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76z" />
-<glyph unicode="Ŋ" horiz-adv-x="1511" d="M752 -285q86 19 143.5 39.5t92.5 46.5t50 60t15 82v100l-541 793q-14 21 -30.5 51.5t-28.5 56.5q3 -23 4.5 -54t1.5 -54v-836h-275v1434h260l555 -826q13 -20 30 -50t30 -58q-3 29 -4.5 58.5t-1.5 49.5v826h274v-1430q0 -46 -4 -92t-15.5 -89.5t-32 -83t-52.5 -73.5 q-38 -38 -79.5 -65.5t-84 -46.5t-86 -32t-84.5 -22z" />
-<glyph unicode="ŋ" d="M508 -289q81 18 137 37.5t90.5 45t50 59t15.5 77.5v684q0 53 -14 91t-39 62.5t-58 36t-72 11.5q-41 0 -77 -10.5t-63 -34t-42.5 -61t-15.5 -91.5v-618h-266v1047h266v-99q48 59 118.5 91t161.5 32q67 0 133 -20t118 -68t84 -127t32 -197v-663q0 -45 -4 -90t-15 -87.5 t-31 -81.5t-52 -73q-37 -39 -78 -66.5t-82.5 -46t-83 -30.5t-78.5 -21z" />
-<glyph unicode="Ō" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7 [...]
-<glyph unicode="ō" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM243 1202v217h676v-217h-676z" />
-<glyph unicode="Ŏ" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7 [...]
-<glyph unicode="ŏ" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM581 1171q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 [...]
-<glyph unicode="Ő" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7 [...]
-<glyph unicode="ő" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM418 1180h-258l291 319h331zM868 1180h-258l291 319h332z" />
-<glyph unicode="Œ" horiz-adv-x="1962" d="M954 0q-42 -12 -91 -18.5t-97 -6.5q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5q51 0 97 -6t91 -18h852v-263h-626v-301h338v-262h-338v-346h669v-262h-895zM766 242 q40 0 74.5 8t62.5 22v889q-31 16 -64 23.5t-73 7.5q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7.5 -107q0 -84 18 -168.5t58.5 -152.5t106.5 -111t163 -43z" />
-<glyph unicode="œ" horiz-adv-x="1847" d="M1290 -25q-53 0 -104.5 9t-98 27t-87.5 44.5t-72 61.5q-63 -69 -149.5 -105.5t-194.5 -36.5q-110 0 -199 37.5t-152 108t-97 172t-34 229.5t34 230t97 173t152 108.5t199 37.5q216 0 344 -143q31 34 72 60.5t87.5 45t98 28t104.5 9.5q97 0 182 -30t148 -91t99 -153.5 t36 -218.5q0 -31 -3 -72t-11 -86h-678q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 126.5 25.5t99.5 82.5l163 -165q-66 -81 -162 -129t-227 -48zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t [...]
-<glyph unicode="Ŕ" horiz-adv-x="1462" d="M184 0v1434h666q136 0 227.5 -37.5t147.5 -99.5t80 -143t24 -169q0 -61 -17.5 -123.5t-52.5 -118.5t-87 -101.5t-121 -70.5l278 -571h-311l-268 535h-289v-535h-277zM854 797q53 0 89 15.5t58 41.5t31.5 60t9.5 71q0 34 -8.5 67.5t-29.5 60t-57.5 42.5t-92.5 16h-393 v-374h393zM696 1569h-258l291 319h332z" />
-<glyph unicode="ŕ" horiz-adv-x="879" d="M154 0v1047h266v-93q22 50 69.5 83.5t124.5 33.5q65 0 116.5 -21t96.5 -59l-36 -243q-38 26 -83.5 43.5t-107.5 17.5q-35 0 -67.5 -10t-57.5 -35t-40 -69t-15 -111v-584h-266zM500 1180h-258l291 319h332z" />
-<glyph unicode="Ŗ" horiz-adv-x="1462" d="M184 0v1434h666q136 0 227.5 -37.5t147.5 -99.5t80 -143t24 -169q0 -61 -17.5 -123.5t-52.5 -118.5t-87 -101.5t-121 -70.5l278 -571h-311l-268 535h-289v-535h-277zM854 797q53 0 89 15.5t58 41.5t31.5 60t9.5 71q0 34 -8.5 67.5t-29.5 60t-57.5 42.5t-92.5 16h-393 v-374h393zM604 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="ŗ" horiz-adv-x="879" d="M154 0v1047h266v-93q22 50 69.5 83.5t124.5 33.5q65 0 116.5 -21t96.5 -59l-36 -243q-38 26 -83.5 43.5t-107.5 17.5q-35 0 -67.5 -10t-57.5 -35t-40 -69t-15 -111v-584h-266zM158 -556q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5 t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="Ř" horiz-adv-x="1462" d="M184 0v1434h666q136 0 227.5 -37.5t147.5 -99.5t80 -143t24 -169q0 -61 -17.5 -123.5t-52.5 -118.5t-87 -101.5t-121 -70.5l278 -571h-311l-268 535h-289v-535h-277zM854 797q53 0 89 15.5t58 41.5t31.5 60t9.5 71q0 34 -8.5 67.5t-29.5 60t-57.5 42.5t-92.5 16h-393 v-374h393zM843 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ř" horiz-adv-x="879" d="M154 0v1047h266v-93q22 50 69.5 83.5t124.5 33.5q65 0 116.5 -21t96.5 -59l-36 -243q-38 26 -83.5 43.5t-107.5 17.5q-35 0 -67.5 -10t-57.5 -35t-40 -69t-15 -111v-584h-266zM630 1180h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="Ś" horiz-adv-x="1300" d="M872 1044q-13 32 -35 60t-52.5 48.5t-70 32t-87.5 11.5q-90 0 -140.5 -36t-50.5 -101q0 -43 32 -72t84 -52.5t118.5 -44.5t136.5 -46.5t136.5 -60t118.5 -85t84 -120t32 -166.5q0 -105 -40 -186t-109.5 -137t-164.5 -85t-205 -29q-101 0 -189 27.5t-159.5 78.5 t-124.5 124t-84 164l252 94q23 -50 55.5 -91.5t72.5 -71t86.5 -46t99.5 -16.5q52 0 94.5 11t73 33t47 54.5t16.5 75.5q0 52 -32 87.5t-84 62t-118.5 48l-136 45.5t-136 56t-118.5 79t-84 113t-32 160q0 81 32.5 152.5t9 [...]
-<glyph unicode="ś" horiz-adv-x="1012" d="M698 758q-15 15 -34.5 31t-44.5 28.5t-56.5 20.5t-68.5 8q-60 0 -97 -20t-37 -58q0 -21 17 -39t45.5 -33t65.5 -28.5t77 -26.5q67 -22 129.5 -50t110 -68t76 -96t28.5 -134q0 -77 -29 -136t-82.5 -99.5t-129 -61.5t-168.5 -21q-64 0 -125.5 13.5t-116.5 38t-100 59.5 t-76 78l176 149q17 -16 41.5 -35.5t56 -36.5t69 -28.5t79.5 -11.5q69 0 113.5 20.5t44.5 71.5q0 25 -15.5 45t-44.5 37.5t-71 34.5t-95 36q-74 27 -134 56t-102 66.5t-64.5 87.5t-22.5 120q0 72 30 127t81.5 92.5 [...]
-<glyph unicode="Ŝ" horiz-adv-x="1300" d="M872 1044q-13 32 -35 60t-52.5 48.5t-70 32t-87.5 11.5q-90 0 -140.5 -36t-50.5 -101q0 -43 32 -72t84 -52.5t118.5 -44.5t136.5 -46.5t136.5 -60t118.5 -85t84 -120t32 -166.5q0 -105 -40 -186t-109.5 -137t-164.5 -85t-205 -29q-101 0 -189 27.5t-159.5 78.5 t-124.5 124t-84 164l252 94q23 -50 55.5 -91.5t72.5 -71t86.5 -46t99.5 -16.5q52 0 94.5 11t73 33t47 54.5t16.5 75.5q0 52 -32 87.5t-84 62t-118.5 48l-136 45.5t-136 56t-118.5 79t-84 113t-32 160q0 81 32.5 152.5t9 [...]
-<glyph unicode="ŝ" horiz-adv-x="1012" d="M698 758q-15 15 -34.5 31t-44.5 28.5t-56.5 20.5t-68.5 8q-60 0 -97 -20t-37 -58q0 -21 17 -39t45.5 -33t65.5 -28.5t77 -26.5q67 -22 129.5 -50t110 -68t76 -96t28.5 -134q0 -77 -29 -136t-82.5 -99.5t-129 -61.5t-168.5 -21q-64 0 -125.5 13.5t-116.5 38t-100 59.5 t-76 78l176 149q17 -16 41.5 -35.5t56 -36.5t69 -28.5t79.5 -11.5q69 0 113.5 20.5t44.5 71.5q0 25 -15.5 45t-44.5 37.5t-71 34.5t-95 36q-74 27 -134 56t-102 66.5t-64.5 87.5t-22.5 120q0 72 30 127t81.5 92.5 [...]
-<glyph unicode="Ş" horiz-adv-x="1300" d="M649 -500q-86 0 -140 27.5t-85 69.5l115 86q12 -16 33.5 -27.5t50.5 -11.5q48 0 73 21t25 59q0 37 -26 58t-74 21q-47 0 -89 -28l-55 67l113 138q-89 8 -165.5 38.5t-139 81t-109 118.5t-74.5 151l252 94q23 -50 55.5 -91.5t72.5 -71t86.5 -46t99.5 -16.5q52 0 94.5 11 t73 33t47 54.5t16.5 75.5q0 52 -32 87.5t-84 62t-118.5 48l-136 45.5t-136 56t-118.5 79t-84 113t-32 160q0 81 32.5 152.5t94.5 125.5t152 85.5t206 31.5q97 0 175.5 -25t138.5 -69.5t101 -105t62 -132.5l-248 [...]
-<glyph unicode="ş" horiz-adv-x="1012" d="M475 -500q-87 0 -140.5 27.5t-84.5 69.5l115 86q12 -16 33.5 -27.5t50.5 -11.5q48 0 73 21t25 59q0 37 -26.5 58t-74.5 21q-46 0 -88 -28l-55 67l115 140q-52 7 -101.5 22.5t-93 39t-80 54t-61.5 66.5l176 149q17 -16 41.5 -35.5t56 -36.5t69 -28.5t79.5 -11.5 q69 0 113.5 20.5t44.5 71.5q0 25 -15.5 45t-44.5 37.5t-71 34.5t-95 36q-74 27 -134 56t-102 66.5t-64.5 87.5t-22.5 120q0 72 30 127t81.5 92.5t121 56.5t148.5 19q67 0 125 -14t105.5 -37t85.5 -53.5t67 -63.5l-179 - [...]
-<glyph unicode="Š" horiz-adv-x="1300" d="M872 1044q-13 32 -35 60t-52.5 48.5t-70 32t-87.5 11.5q-90 0 -140.5 -36t-50.5 -101q0 -43 32 -72t84 -52.5t118.5 -44.5t136.5 -46.5t136.5 -60t118.5 -85t84 -120t32 -166.5q0 -105 -40 -186t-109.5 -137t-164.5 -85t-205 -29q-101 0 -189 27.5t-159.5 78.5 t-124.5 124t-84 164l252 94q23 -50 55.5 -91.5t72.5 -71t86.5 -46t99.5 -16.5q52 0 94.5 11t73 33t47 54.5t16.5 75.5q0 52 -32 87.5t-84 62t-118.5 48l-136 45.5t-136 56t-118.5 79t-84 113t-32 160q0 81 32.5 152.5t9 [...]
-<glyph unicode="š" horiz-adv-x="1012" d="M698 758q-15 15 -34.5 31t-44.5 28.5t-56.5 20.5t-68.5 8q-60 0 -97 -20t-37 -58q0 -21 17 -39t45.5 -33t65.5 -28.5t77 -26.5q67 -22 129.5 -50t110 -68t76 -96t28.5 -134q0 -77 -29 -136t-82.5 -99.5t-129 -61.5t-168.5 -21q-64 0 -125.5 13.5t-116.5 38t-100 59.5 t-76 78l176 149q17 -16 41.5 -35.5t56 -36.5t69 -28.5t79.5 -11.5q69 0 113.5 20.5t44.5 71.5q0 25 -15.5 45t-44.5 37.5t-71 34.5t-95 36q-74 27 -134 56t-102 66.5t-64.5 87.5t-22.5 120q0 72 30 127t81.5 92.5 [...]
-<glyph unicode="Ţ" horiz-adv-x="1280" d="M612 -500q-86 0 -140 27.5t-85 69.5l115 86q12 -16 33.5 -27.5t50.5 -11.5q48 0 73 21t25 59q0 37 -26 58t-74 21t-88 -28l-56 67l129 158h-67v1167h-410v267h1096v-267h-410v-1167h-61l-58 -94q60 0 103.5 -14t72 -39t42.5 -59t14 -73q0 -48 -18.5 -88.5t-54 -70 t-87.5 -46t-119 -16.5z" />
-<glyph unicode="ţ" horiz-adv-x="805" d="M434 -500q-87 0 -140.5 27.5t-84.5 69.5l115 86q12 -16 33.5 -27.5t50.5 -11.5q48 0 73 21t25 59q0 37 -26 58t-74 21q-47 0 -89 -28l-55 67l117 144q-90 20 -129 89t-39 177v561h-150v234h150v305l266 127v-432h234v-234h-234v-522q0 -40 19.5 -61t54.5 -21 q40 0 83 11.5t89 33.5l-31 -231q-72 -35 -166 -46l-45 -71q60 0 103.5 -14t72 -39t42.5 -59t14 -73q0 -48 -18.5 -88.5t-54 -70t-87.5 -46t-119 -16.5z" />
-<glyph unicode="Ť" horiz-adv-x="1280" d="M778 1167v-1167h-276v1167h-410v267h1096v-267h-410zM764 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ť" horiz-adv-x="975" d="M465 -25q-68 0 -116 19t-78.5 54.5t-45 87t-14.5 116.5v561h-150v234h150v305l266 127v-432h234v-234h-234v-522q0 -40 19.5 -61t54.5 -21q40 0 83 11.5t89 33.5l-31 -231q-46 -23 -104 -35.5t-123 -12.5zM924 1720v-16q0 -86 -16.5 -159t-46 -135.5t-71 -116.5 t-91.5 -103l-86 65q14 28 25.5 77t20 110.5t13 132.5t4.5 145h248z" />
-<glyph unicode="Ŧ" horiz-adv-x="1280" d="M778 1167v-385h234v-256h-234v-526h-276v526h-234v256h234v385h-410v267h1096v-267h-410z" />
-<glyph unicode="ŧ" horiz-adv-x="825" d="M475 -25q-68 0 -116 19t-78.5 54.5t-45 87t-14.5 116.5v147h-149v234h149v180h-149v234h149v305l266 127v-432h234v-234h-234v-180h234v-234h-234v-108q0 -40 19.5 -61t54.5 -21q40 0 83 11.5t89 33.5l-31 -231q-46 -23 -104 -35.5t-123 -12.5z" />
-<glyph unicode="Ũ" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM913 1583q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 113t39.5 90t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 6 [...]
-<glyph unicode="ũ" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM735 1194q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5 t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 113t39.5 90t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 66.5h125q0 -62 -12 -113.5t-40 -89.5t-73 -59t-111 -21z" />
-<glyph unicode="Ū" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM413 1591v217h676v-217h-676z" />
-<glyph unicode="ū" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM235 1202v217h676v-217h-676z" />
-<glyph unicode="Ŭ" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM751 1560q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5t33.5 50t11 57h205q0 -69 -23.5 -132.5t-71.5 -111.5t-122 -76.5t-175 -28.5z" />
-<glyph unicode="ŭ" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM573 1171q-101 0 -174 29t-121 77t-71 111t-23 132 h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5t33.5 50t11 57h205q0 -69 -23.5 -132.5t-71.5 -111.5t-122 -76.5t-175 -28.5z" />
-<glyph unicode="Ů" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM751 1646q32 0 56 23.5t24 58.5t-24 58.5t-56 23.5q-34 0 -56.5 -23.5t-22.5 -58.5t22.5 -58.5t56.5 -23.5zM751 1464q-54 0 -102.5 21t-85 57t-57.5 84t-21 102q0 55 21 103t57.5 83.5t85 56t102.5 20.5q55 [...]
-<glyph unicode="ů" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM573 1257q32 0 56 23.5t24 58.5t-24 58.5t-56 23.5 q-34 0 -56.5 -23.5t-22.5 -58.5t22.5 -58.5t56.5 -23.5zM573 1075q-54 0 -102.5 21t-85 57t-57.5 84t-21 102q0 55 21 103t57.5 83.5t85 56t102.5 20.5q55 0 103.5 -20.5t85 -56t57.5 -83.5t21 -103q0 -54 -21 -102t-57.5 -84t-85 [...]
-<glyph unicode="Ű" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM588 1569h-258l291 319h331zM1038 1569h-258l291 319h332z" />
-<glyph unicode="ű" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM410 1180h-258l291 319h331zM860 1180h-258l291 319 h332z" />
-<glyph unicode="Ų" horiz-adv-x="1503" d="M793 -500q-58 0 -105 16t-80 45t-51 69.5t-18 90.5q0 81 45.5 144.5t130.5 109.5q-105 5 -203 37.5t-173 100t-120 174.5t-45 262v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885 q0 -120 -26.5 -209.5t-73.5 -154.5t-111 -109t-139 -72q-61 -23 -103.5 -55t-69 -67.5t-38.5 -71.5t-12 -66q0 -48 23 -69t55 -21q27 0 47 11.5t32 27.5l123 -88q-15 -17 -36.5 -35.5t-50.5 -34t-67.5 -25.5t-88.5 -10z" />
-<glyph unicode="ų" d="M823 -500q-57 0 -104 16t-80.5 45t-51.5 69.5t-18 90.5q0 91 58 161t164 118v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619 h266v-1047q-74 -21 -125.5 -51.5t-84 -65t-47 -71t-14.5 -68.5q0 -48 23 -69t55 -21q28 0 48 11.5t32 27.5l123 -88q-15 -17 -36.5 -35.5t-51 -34t-68 -25.5t-88.5 -10z" />
-<glyph unicode="Ŵ" horiz-adv-x="1786" d="M1694 1434l-310 -1434h-258l-217 844q-9 34 -13 60.5l-7 47.5q-3 -21 -8 -47.5t-13 -58.5l-211 -846h-258l-307 1434h285l160 -807q8 -37 11.5 -61l6.5 -46q3 22 7 46.5t13 62.5l197 805h240l200 -805q10 -38 14 -62.5t7 -46.5l6.5 46t11.5 61l160 807h283zM1065 1569 l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="ŵ" horiz-adv-x="1538" d="M1171 0h-235l-133 522q-4 14 -8.5 33.5l-9.5 41l-9.5 42.5t-7.5 39q-3 -18 -7.5 -39l-9.5 -42.5l-9.5 -41t-8.5 -33.5l-133 -522h-235l-293 1047h262l123 -510q8 -31 14 -68.5t12 -69.5l15 69.5t16 68.5l133 510h242l131 -510q8 -31 16 -68.5l15 -69.5q6 32 12 69.5 t14 68.5l127 510h262zM940 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ŷ" horiz-adv-x="1458" d="M592 0v559l-531 875h314l354 -586l354 586h314l-531 -875v-559h-274zM901 1569l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="ŷ" horiz-adv-x="1167" d="M561 -397h-276l159 411l-393 1033h273l229 -605q18 -47 31 -96q6 25 14 49l16 47l230 605h272zM757 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="Ÿ" horiz-adv-x="1458" d="M592 0v559l-531 875h314l354 -586l354 586h314l-531 -875v-559h-274zM535 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5z M924 1579q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5z" />
-<glyph unicode="Ź" horiz-adv-x="1362" d="M113 0v211l749 960h-704v263h1071v-211l-756 -961h756v-262h-1116zM711 1569h-258l291 319h332z" />
-<glyph unicode="ź" horiz-adv-x="1114" d="M113 0v209l540 600h-501v238h839v-203l-538 -606h536v-238h-876zM555 1180h-258l291 319h332z" />
-<glyph unicode="Ż" horiz-adv-x="1362" d="M113 0v211l749 960h-704v263h1071v-211l-756 -961h756v-262h-1116zM690 1579q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 -59t-32 -47t-48 -31.5t-58.5 -11.5z" />
-<glyph unicode="ż" horiz-adv-x="1114" d="M113 0v209l540 600h-501v238h839v-203l-538 -606h536v-238h-876zM573 1190q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 -59t-32 -47t-48 -31.5t-58.5 -11.5z" />
-<glyph unicode="Ž" horiz-adv-x="1362" d="M113 0v211l749 960h-704v263h1071v-211l-756 -961h756v-262h-1116zM817 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ž" horiz-adv-x="1114" d="M113 0v209l540 600h-501v238h839v-203l-538 -606h536v-238h-876zM700 1180h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ſ" horiz-adv-x="666" d="M164 0v1206q0 78 22.5 127t59 76.5t82.5 38t94 10.5q63 0 119.5 -11.5t113.5 -39.5v-217q-31 12 -54 18.5t-40.5 10.5t-30.5 5t-24 1q-34 0 -55 -17.5t-21 -54.5v-1153h-266z" />
-<glyph unicode="ƒ" horiz-adv-x="791" d="M-35 -289q81 18 137 37.5t90.5 45t50 59t15.5 77.5v879h-145v238h145v159q0 78 22.5 127t59 76.5t82.5 38t94 10.5q63 0 120 -11.5t114 -39.5v-217q-31 12 -54 18.5t-41 10.5t-31 5t-24 1q-34 0 -55 -17.5t-21 -54.5v-106h226v-238h-226v-813q0 -45 -4 -90t-15 -87.5 t-31 -81.5t-52 -73q-37 -39 -77.5 -66.5t-82.5 -46t-83 -30.5t-79 -21z" />
-<glyph unicode="Ǎ" horiz-adv-x="1520" d="M1147 0l-111 297h-553l-110 -297h-297l549 1434h270l549 -1434h-297zM805 922q-11 26 -23.5 62t-21.5 67q-8 -31 -21 -67t-24 -62l-142 -373h373zM860 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ǎ" horiz-adv-x="1143" d="M748 0v76q-18 -19 -43.5 -37t-59.5 -32.5t-75.5 -23t-92.5 -8.5q-81 0 -151 22t-122 66t-82 108.5t-30 150.5q0 90 36 155.5t94.5 108t133 62.5t152.5 20q75 0 133.5 -13.5t106.5 -36.5v54q0 86 -49 126t-140 40q-93 0 -171 -27t-152 -78l-82 215q37 25 83.5 47.5 t100.5 39.5t115.5 26.5t128.5 9.5q47 0 97 -5t97.5 -20t90 -43t74.5 -72.5t50.5 -109t18.5 -153.5v-668h-262zM528 209q73 0 127 25.5t93 60.5v113q-41 15 -91.5 24.5t-103.5 9.5q-41 0 -77.5 -6.5t-64 -21t-43.5 -3 [...]
-<glyph unicode="Ǐ" horiz-adv-x="666" d="M195 0v1434h276v-1434h-276zM459 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ǐ" horiz-adv-x="578" d="M418 1180h-256l-288 319h245l172 -141l172 141h246zM156 0v1047h266v-1047h-266z" />
-<glyph unicode="Ǒ" horiz-adv-x="1530" d="M766 -25q-116 0 -208 30.5t-162.5 83t-120.5 124t-81.5 154t-46 172t-14.5 178.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5t207.5 -30.5t161.5 -83t120 -124t81.5 -154t46 -172t14.5 -177.5q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124 t-161.5 -83t-207.5 -30.5zM766 242q96 0 161.5 45t106 115t58.5 153.5t18 161.5q0 55 -7.5 111.5t-24.5 109t-44 99t-65 81t-88.5 54.5t-114.5 20q-65 0 -115.5 -21t-89 -56.5t-65.5 -82.5t-44 -100t-24.5 -108t-7 [...]
-<glyph unicode="ǒ" horiz-adv-x="1167" d="M584 -25q-111 0 -200 37.5t-151.5 108t-96.5 172t-34 229.5t34 230t96.5 173t151.5 108.5t200 37.5t200 -37.5t151.5 -108.5t96 -173t33.5 -230t-33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 69 -15 123t-42.5 92 t-67.5 58t-90 20t-90 -20t-67.5 -58t-42.5 -92t-15 -123q0 -68 15 -122t42.5 -91.5t67.5 -57.5t90 -20zM708 1180h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="Ǔ" horiz-adv-x="1503" d="M752 -25q-73 0 -144.5 12.5t-135.5 40.5t-118.5 72.5t-94.5 108.5t-62.5 148t-22.5 192v885h277v-885q0 -89 20.5 -148.5t59.5 -94.5t95 -49.5t126 -14.5q69 0 124.5 14.5t95 49.5t60.5 94.5t21 148.5v885h276v-885q0 -160 -48 -269.5t-128.5 -177t-184.5 -97.5t-216 -30 zM876 1569h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ǔ" d="M791 0v98q-48 -59 -119 -91t-162 -32q-67 0 -132.5 20.5t-117.5 68.5t-84.5 127t-32.5 196v660h267v-615q0 -53 14 -91t38.5 -62.5t57.5 -36t72 -11.5q41 0 77.5 10.5t63.5 34t42.5 61t15.5 91.5v619h266v-1047h-266zM702 1180h-256l-288 319h245l172 -141l172 141h246z " />
-<glyph unicode="Ǽ" horiz-adv-x="1851" d="M805 0v297h-371l-168 -297h-297l832 1434h895v-263h-615v-301h326v-262h-326v-346h658v-262h-934zM805 549v409q-9 -24 -21.5 -49l-27.5 -53l-174 -307h223zM1143 1569h-258l291 319h332z" />
-<glyph unicode="ǽ" horiz-adv-x="1753" d="M1196 -25q-121 0 -215 43.5t-156 124.5q-83 -92 -179 -130t-193 -38q-75 0 -141 22t-115 66t-77 108.5t-28 150.5q0 92 34 158t88.5 109t124.5 63t142 20q71 0 126 -13t100 -36v49q0 86 -45 126t-130 40t-156.5 -27t-139.5 -78l-82 215q36 25 80 47.5t95.5 39.5 t109.5 26.5t122 9.5q41 0 84.5 -4.5t85.5 -17t79.5 -35t66.5 -58.5q55 56 137.5 85.5t181.5 29.5q97 0 182 -30t148 -91t99 -153.5t36 -218.5q0 -31 -3 -72t-11 -86h-678q4 -51 24 -90t50.5 -66t69.5 -41t83 -14q72 0 [...]
-<glyph unicode="Ǿ" horiz-adv-x="1530" d="M786 1569h-258l291 319h332zM133 -25l152 226q-40 53 -69 114.5t-47.5 128t-27 136t-8.5 137.5q0 88 14.5 177.5t46 172t81.5 154t120.5 124t162.5 83t208 30.5q94 0 172 -20t143 -58l54 78h262l-152 -225q40 -53 69 -114.5t47.5 -128t27 -136t8.5 -137.5 q0 -89 -14.5 -178.5t-46 -172t-81.5 -154t-120 -124t-161.5 -83t-207.5 -30.5q-96 0 -175 20.5t-142 57.5l-54 -78h-262zM420 717q0 -63 10 -128t33 -124l463 684q-34 20 -73 31.5t-87 11.5q-65 0 -115.5 -21t-89 -56.5t-65. [...]
-<glyph unicode="ǿ" horiz-adv-x="1169" d="M561 1180h-258l291 319h332zM102 -25l111 168q-54 70 -82.5 165.5t-28.5 213.5q0 128 34 230t96.5 173t151.5 108.5t200 37.5q60 0 114 -11.5t101 -33.5l28 45h240l-113 -170q54 -71 82.5 -165.5t28.5 -213.5q0 -128 -33.5 -229.5t-96 -172t-151.5 -108t-200 -37.5 q-60 0 -113.5 11.5t-99.5 31.5l-29 -43h-240zM584 231q50 0 90 20t67.5 57.5t42.5 91.5t15 122q0 34 -4 67.5t-11 55.5l-266 -403q16 -7 31.5 -9t34.5 -2zM369 522q0 -34 4 -65.5t10 -57.5l268 406q-12 5 -31 7.5t- [...]
-<glyph unicode="Ș" horiz-adv-x="1300" d="M872 1044q-13 32 -35 60t-52.5 48.5t-70 32t-87.5 11.5q-90 0 -140.5 -36t-50.5 -101q0 -43 32 -72t84 -52.5t118.5 -44.5t136.5 -46.5t136.5 -60t118.5 -85t84 -120t32 -166.5q0 -105 -40 -186t-109.5 -137t-164.5 -85t-205 -29q-101 0 -189 27.5t-159.5 78.5 t-124.5 124t-84 164l252 94q23 -50 55.5 -91.5t72.5 -71t86.5 -46t99.5 -16.5q52 0 94.5 11t73 33t47 54.5t16.5 75.5q0 52 -32 87.5t-84 62t-118.5 48l-136 45.5t-136 56t-118.5 79t-84 113t-32 160q0 81 32.5 152.5t9 [...]
-<glyph unicode="ș" horiz-adv-x="1012" d="M698 758q-15 15 -34.5 31t-44.5 28.5t-56.5 20.5t-68.5 8q-60 0 -97 -20t-37 -58q0 -21 17 -39t45.5 -33t65.5 -28.5t77 -26.5q67 -22 129.5 -50t110 -68t76 -96t28.5 -134q0 -77 -29 -136t-82.5 -99.5t-129 -61.5t-168.5 -21q-64 0 -125.5 13.5t-116.5 38t-100 59.5 t-76 78l176 149q17 -16 41.5 -35.5t56 -36.5t69 -28.5t79.5 -11.5q69 0 113.5 20.5t44.5 71.5q0 25 -15.5 45t-44.5 37.5t-71 34.5t-95 36q-74 27 -134 56t-102 66.5t-64.5 87.5t-22.5 120q0 72 30 127t81.5 92.5 [...]
-<glyph unicode="Ț" horiz-adv-x="1280" d="M778 1167v-1167h-276v1167h-410v267h1096v-267h-410zM518 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="ț" horiz-adv-x="805" d="M465 -25q-68 0 -116 19t-78.5 54.5t-45 87t-14.5 116.5v561h-150v234h150v305l266 127v-432h234v-234h-234v-522q0 -40 19.5 -61t54.5 -21q40 0 83 11.5t89 33.5l-31 -231q-46 -23 -104 -35.5t-123 -12.5zM324 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5 t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="ȷ" horiz-adv-x="578" d="M-137 -227q81 18 137 37.5t90.5 45t50 58.5t15.5 78v1055h266v-990q0 -45 -4 -89.5t-15 -87t-31 -81.5t-53 -73q-37 -39 -77.5 -66.5t-82 -46.5t-83 -31t-78.5 -20z" />
-<glyph unicode="" horiz-adv-x="578" d="M-137 -227q81 18 137 37.5t90.5 45t50 58.5t15.5 78v1055h266v-990q0 -45 -4 -89.5t-15 -87t-31 -81.5t-53 -73q-37 -39 -77.5 -66.5t-82 -46.5t-83 -31t-78.5 -20z" />
-<glyph unicode="ˆ" horiz-adv-x="973" d="M657 1180l-172 141l-172 -141h-245l290 319h256l289 -319h-246z" />
-<glyph unicode="ˇ" horiz-adv-x="973" d="M612 1180h-256l-288 319h245l172 -141l172 141h246z" />
-<glyph unicode="ˉ" horiz-adv-x="973" d="M147 1202v217h676v-217h-676z" />
-<glyph unicode="˘" horiz-adv-x="973" d="M485 1171q-101 0 -174 29t-121 77t-71 111t-23 132h205q0 -29 10.5 -57t32.5 -50t57 -35.5t84 -13.5t84.5 13.5t58 35.5t33.5 50t11 57h205q0 -69 -23.5 -132.5t-71.5 -111.5t-122 -76.5t-175 -28.5z" />
-<glyph unicode="˙" horiz-adv-x="973" d="M485 1190q-32 0 -59 11.5t-47 31.5t-31.5 47t-11.5 59q0 31 11.5 58.5t31.5 48t47 32t59 11.5q31 0 58.5 -11.5t48 -32t32 -48t11.5 -58.5q0 -32 -11.5 -59t-32 -47t-48 -31.5t-58.5 -11.5z" />
-<glyph unicode="˚" horiz-adv-x="973" d="M485 1257q32 0 56 23.5t24 58.5t-24 58.5t-56 23.5q-34 0 -56.5 -23.5t-22.5 -58.5t22.5 -58.5t56.5 -23.5zM485 1075q-54 0 -102.5 21t-85 57t-57.5 84t-21 102q0 55 21 103t57.5 83.5t85 56t102.5 20.5q55 0 103.5 -20.5t85 -56t57.5 -83.5t21 -103q0 -54 -21 -102 t-57.5 -84t-85 -57t-103.5 -21z" />
-<glyph unicode="˛" horiz-adv-x="973" d="M492 -500q-58 0 -105 16t-80 45t-51 69.5t-18 90.5q0 103 74.5 179.5t205.5 119.5h184q-69 -28 -116 -62.5t-76.5 -71.5t-42 -73.5t-12.5 -68.5q0 -48 22.5 -69t54.5 -21q28 0 48 11.5t32 27.5l123 -88q-15 -17 -36.5 -35.5t-50.5 -34t-67.5 -25.5t-88.5 -10z" />
-<glyph unicode="˜" horiz-adv-x="973" d="M647 1194q-54 0 -99.5 14.5t-85.5 32.5t-77 32.5t-76 14.5q-38 0 -65.5 -19.5t-30.5 -66.5h-125q0 61 12 113t39.5 90t73 59t111.5 21q54 0 99.5 -14.5t85.5 -32.5t77 -32.5t76 -14.5q38 0 65.5 19.5t30.5 66.5h125q0 -62 -12 -113.5t-40 -89.5t-73 -59t-111 -21z" />
-<glyph unicode="˝" horiz-adv-x="973" d="M209 1180h-258l291 319h331zM659 1180h-258l291 319h332z" />
-<glyph unicode="μ" horiz-adv-x="1221" d="M801 0v96q-29 -52 -85 -86.5t-134 -34.5q-53 0 -95 16t-65 36v-312l-268 -125v1457h266v-615q0 -53 14 -91t38.5 -62.5t58 -36t71.5 -11.5q42 0 78 10.5t63 34t42.5 61t15.5 91.5v619h266v-1047h-266z" />
-<glyph unicode="π" horiz-adv-x="1364" d="M1055 -25q-68 0 -116 19t-79 54.5t-45 87t-14 116.5v518h-238v-37q0 -133 -10 -251.5t-41.5 -218t-90 -179t-155.5 -133.5l-225 178q51 31 98.5 69t85 97t60 145.5t22.5 214.5v115h-178v277h1165v-277h-227v-479q0 -40 19.5 -61t54.5 -21q40 0 82.5 11.5t89.5 33.5 l-31 -231q-46 -23 -104 -35.5t-123 -12.5z" />
-<glyph unicode="Ẁ" horiz-adv-x="1786" d="M1694 1434l-310 -1434h-258l-217 844q-9 34 -13 60.5l-7 47.5q-3 -21 -8 -47.5t-13 -58.5l-211 -846h-258l-307 1434h285l160 -807q8 -37 11.5 -61l6.5 -46q3 22 7 46.5t13 62.5l197 805h240l200 -805q10 -38 14 -62.5t7 -46.5l6.5 46t11.5 61l160 807h283zM914 1569 l-365 319h332l291 -319h-258z" />
-<glyph unicode="ẁ" horiz-adv-x="1538" d="M1171 0h-235l-133 522q-4 14 -8.5 33.5l-9.5 41l-9.5 42.5t-7.5 39q-3 -18 -7.5 -39l-9.5 -42.5l-9.5 -41t-8.5 -33.5l-133 -522h-235l-293 1047h262l123 -510q8 -31 14 -68.5t12 -69.5l15 69.5t16 68.5l133 510h242l131 -510q8 -31 16 -68.5l15 -69.5q6 32 12 69.5 t14 68.5l127 510h262zM789 1180l-365 319h332l291 -319h-258z" />
-<glyph unicode="Ẃ" horiz-adv-x="1786" d="M1694 1434l-310 -1434h-258l-217 844q-9 34 -13 60.5l-7 47.5q-3 -21 -8 -47.5t-13 -58.5l-211 -846h-258l-307 1434h285l160 -807q8 -37 11.5 -61l6.5 -46q3 22 7 46.5t13 62.5l197 805h240l200 -805q10 -38 14 -62.5t7 -46.5l6.5 46t11.5 61l160 807h283zM872 1569 h-258l291 319h332z" />
-<glyph unicode="ẃ" horiz-adv-x="1538" d="M1171 0h-235l-133 522q-4 14 -8.5 33.5l-9.5 41l-9.5 42.5t-7.5 39q-3 -18 -7.5 -39l-9.5 -42.5l-9.5 -41t-8.5 -33.5l-133 -522h-235l-293 1047h262l123 -510q8 -31 14 -68.5t12 -69.5l15 69.5t16 68.5l133 510h242l131 -510q8 -31 16 -68.5l15 -69.5q6 32 12 69.5 t14 68.5l127 510h262zM747 1180h-258l291 319h332z" />
-<glyph unicode="Ẅ" horiz-adv-x="1786" d="M1694 1434l-310 -1434h-258l-217 844q-9 34 -13 60.5l-7 47.5q-3 -21 -8 -47.5t-13 -58.5l-211 -846h-258l-307 1434h285l160 -807q8 -37 11.5 -61l6.5 -46q3 22 7 46.5t13 62.5l197 805h240l200 -805q10 -38 14 -62.5t7 -46.5l6.5 46t11.5 61l160 807h283zM699 1579 q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58.5 -11.5zM1088 1579q-32 0 -59 [...]
-<glyph unicode="ẅ" horiz-adv-x="1538" d="M1171 0h-235l-133 522q-4 14 -8.5 33.5l-9.5 41l-9.5 42.5t-7.5 39q-3 -18 -7.5 -39l-9.5 -42.5l-9.5 -41t-8.5 -33.5l-133 -522h-235l-293 1047h262l123 -510q8 -31 14 -68.5t12 -69.5l15 69.5t16 68.5l133 510h242l131 -510q8 -31 16 -68.5l15 -69.5q6 32 12 69.5 t14 68.5l127 510h262zM574 1190q-32 0 -59 11.5t-47.5 31.5t-32 47t-11.5 59q0 31 11.5 58.5t32 48t47.5 32t59 11.5q31 0 58.5 -11.5t47.5 -32t31.5 -48t11.5 -58.5q0 -32 -11.5 -59t-31.5 -47t-47.5 -31.5t-58. [...]
-<glyph unicode="Ỳ" horiz-adv-x="1458" d="M592 0v559l-531 875h314l354 -586l354 586h314l-531 -875v-559h-274zM709 1569l-365 319h332l291 -319h-258z" />
-<glyph unicode="ỳ" horiz-adv-x="1167" d="M561 -397h-276l159 411l-393 1033h273l229 -605q18 -47 31 -96q6 25 14 49l16 47l230 605h272zM607 1180l-365 319h332l291 -319h-258z" />
-<glyph unicode=" " horiz-adv-x="995" />
-<glyph unicode=" " horiz-adv-x="1991" />
-<glyph unicode=" " horiz-adv-x="995" />
-<glyph unicode=" " horiz-adv-x="1991" />
-<glyph unicode=" " horiz-adv-x="663" />
-<glyph unicode=" " horiz-adv-x="497" />
-<glyph unicode=" " horiz-adv-x="331" />
-<glyph unicode=" " horiz-adv-x="331" />
-<glyph unicode=" " horiz-adv-x="248" />
-<glyph unicode=" " horiz-adv-x="398" />
-<glyph unicode=" " horiz-adv-x="110" />
-<glyph unicode="‐" horiz-adv-x="844" d="M154 477v256h536v-256h-536z" />
-<glyph unicode="‑" horiz-adv-x="844" d="M154 477v256h536v-256h-536z" />
-<glyph unicode="‒" horiz-adv-x="844" d="M154 477v256h536v-256h-536z" />
-<glyph unicode="–" horiz-adv-x="1014" d="M102 471v234h809v-234h-809z" />
-<glyph unicode="—" horiz-adv-x="1178" d="M-10 471v234h1198v-234h-1198z" />
-<glyph unicode="―" horiz-adv-x="1178" d="M-10 471v234h1198v-234h-1198z" />
-<glyph unicode="‘" horiz-adv-x="506" d="M434 1374q-14 -12 -33 -34t-38.5 -48t-36 -54t-25.5 -52q50 -16 83.5 -56.5t33.5 -103.5q0 -38 -14 -69.5t-38 -54t-55 -34.5t-65 -12q-36 0 -67.5 13t-55.5 38t-37.5 61.5t-13.5 84.5q0 60 20 118.5t54.5 112t80 98t95.5 76.5z" />
-<glyph unicode="’" horiz-adv-x="506" d="M72 940q14 12 33 34t38.5 48t36 54t25.5 52q-50 16 -83.5 56.5t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76z" />
-<glyph unicode="‚" horiz-adv-x="506" d="M72 -205q14 12 33 34t38.5 48t36 54t25.5 53q-50 15 -83.5 55.5t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76z" />
-<glyph unicode="‛" horiz-adv-x="506" d="M322 856q-50 31 -95.5 76t-80 98.5t-54.5 112.5t-20 119q0 48 13.5 84.5t37.5 61t55.5 37.5t67.5 13q34 0 65 -12t55 -34.5t38 -53.5t14 -70q0 -63 -33.5 -103.5t-83.5 -56.5q9 -24 25.5 -52t36 -54t38.5 -48t33 -34z" />
-<glyph unicode="“" horiz-adv-x="995" d="M434 1374q-14 -12 -33 -34t-38.5 -48t-36 -54t-25.5 -52q50 -16 83.5 -56.5t33.5 -103.5q0 -38 -14 -69.5t-38 -54t-55 -34.5t-65 -12q-36 0 -67.5 13t-55.5 38t-37.5 61.5t-13.5 84.5q0 60 20 118.5t54.5 112t80 98t95.5 76.5zM924 1374q-14 -12 -33.5 -34t-39 -48 t-36 -54t-24.5 -52q49 -16 82.5 -56.5t33.5 -103.5q0 -38 -14 -69.5t-38 -54t-55 -34.5t-65 -12q-36 0 -67.5 13t-55.5 38t-37.5 61.5t-13.5 84.5q0 60 20.5 118.5t55 112t79.5 98t95 76.5z" />
-<glyph unicode="”" horiz-adv-x="995" d="M72 940q14 12 33 34t38.5 48t36 54t25.5 52q-50 16 -83.5 56.5t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76zM561 940q14 12 33.5 34t39 48t36 54t24.5 52 q-49 16 -82.5 56.5t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76z" />
-<glyph unicode="„" horiz-adv-x="995" d="M72 -205q14 12 33 34t38.5 48t36 54t25.5 53q-50 15 -83.5 55.5t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76zM561 -205q14 12 33.5 34t39 48t36 54t24.5 53 q-49 15 -82.5 55.5t-33.5 103.5q0 39 14 70t38 53.5t55 34.5t65 12q36 0 67.5 -13t55.5 -37.5t37.5 -61t13.5 -84.5q0 -60 -20.5 -119t-55 -112.5t-79.5 -98.5t-95 -76z" />
-<glyph unicode="†" horiz-adv-x="1196" d="M715 868l16 -184l-63 -973h-140l-63 973l16 184l-205 -26h-143v256h143l205 -29l-16 133v256h266v-256l-16 -133l205 29h143v-256h-143z" />
-<glyph unicode="‡" horiz-adv-x="1196" d="M920 328h143v-256h-143l-205 28l16 -133v-256h-266v256l16 133l-205 -28h-143v256h143l205 -27l-16 184v199l16 184l-205 -26h-143v256h143l205 -29l-16 133v256h266v-256l-16 -133l205 29h143v-256h-143l-205 26l16 -184v-199l-16 -184z" />
-<glyph unicode="•" horiz-adv-x="862" d="M428 420q-65 0 -121 23t-97 63t-64 94.5t-23 116.5q0 61 23 115.5t64 95.5t97 64.5t121 23.5t122 -23.5t99 -64.5t66 -95.5t24 -115.5q0 -62 -24 -116.5t-66 -94.5t-99 -63t-122 -23z" />
-<glyph unicode="…" horiz-adv-x="2144" d="M356 -25q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13t69 -13t55 -36t36.5 -54t13.5 -67q0 -35 -13.5 -66t-36.5 -53.5t-55 -35.5t-69 -13zM1071 -25q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13 t68.5 -13t55 -36t37 -54t13.5 -67q0 -35 -13.5 -66t-37 -53.5t-55 -35.5t-68.5 -13zM1786 -25q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66q0 36 13 67t36 54t54.5 36t68.5 13t68.5 -13t55 -36t37 -54t13.5 -67q0 [...]
-<glyph unicode=" " horiz-adv-x="398" />
-<glyph unicode="‰" horiz-adv-x="2540" d="M440 709q-95 0 -161.5 33.5t-108 87.5t-60 120.5t-18.5 132.5q0 41 8 84.5t25.5 85.5t44.5 79t66 65t89.5 44.5t114.5 16.5q95 0 161.5 -33.5t108 -87.5t60 -121t18.5 -133q0 -61 -18 -127t-58.5 -121t-107.5 -90.5t-164 -35.5zM440 930q31 0 53.5 12.5t37 33.5 t21.5 48.5t7 58.5q0 69 -29.5 111.5t-89.5 42.5q-31 0 -53 -12.5t-36.5 -33.5t-21.5 -49t-7 -59q0 -68 29 -110.5t89 -42.5zM508 -25h-264l977 1483h264zM1288 -25q-96 0 -162.5 34t-107.5 87.5t-59.5 120.5t-18.5 13 [...]
-<glyph unicode="′" horiz-adv-x="508" d="M113 1495h323l-131 -682h-233z" />
-<glyph unicode="″" horiz-adv-x="918" d="M113 1495h323l-131 -682h-233zM522 1495h324l-131 -682h-234z" />
-<glyph unicode="‹" horiz-adv-x="690" d="M373 0l-312 524l312 523h286l-311 -523l311 -524h-286z" />
-<glyph unicode="›" horiz-adv-x="690" d="M31 0l311 524l-311 523h286l312 -523l-312 -524h-286z" />
-<glyph unicode="⁄" horiz-adv-x="743" d="M-246 -25l979 1483h256l-979 -1483h-256z" />
-<glyph unicode=" " horiz-adv-x="497" />
-<glyph unicode="ⁿ" horiz-adv-x="987" d="M655 580v505q0 43 -11.5 74t-31.5 51t-47.5 29.5t-58.5 9.5q-35 0 -64.5 -8.5t-52 -28t-35 -50t-12.5 -75.5v-507h-219v858h219v-80q38 48 96.5 74t132.5 26q55 0 109.5 -17t97.5 -56t69.5 -104t26.5 -161v-540h-219z" />
-<glyph unicode="₤" horiz-adv-x="1253" d="M584 401q-13 -40 -30 -76t-40 -65h678v-260h-1016v254q48 22 80.5 61.5t50.5 85.5h-153v205h178q-6 35 -16.5 69t-22.5 66h-139v205h67q-5 20 -6.5 41.5t-1.5 44.5q0 81 29.5 157.5t91 136t156 96t223.5 36.5q73 0 134.5 -9.5t117 -28.5t106.5 -47.5t103 -65.5l-105 -254 q-28 23 -62 48.5t-77 47t-96.5 35.5t-120.5 14q-61 0 -104 -14t-70 -36.5t-39.5 -52.5t-12.5 -61q0 -23 4.5 -44.5t10.5 -43.5h407v-205h-321q9 -31 14.5 -64t7.5 -71h299v-205h-325z" />
-<glyph unicode="€" horiz-adv-x="1309" d="M557 440q23 -99 80 -154t143 -55q71 0 129 38.5t105 107.5l213 -131q-37 -61 -83.5 -111t-102.5 -85.5t-121 -55t-140 -19.5q-117 0 -203.5 35.5t-148 98.5t-99.5 148.5t-57 186.5h-192l86 197h80q-1 9 -1.5 20.5t-0.5 22.5v20v13v18t2 29h-166l86 199h106 q20 101 60.5 191t104.5 157.5t154 107t210 39.5q94 0 190.5 -30.5t200.5 -106.5l-98 -242q-29 25 -60.5 47.5t-67.5 39.5t-76.5 26.5t-88.5 9.5q-50 0 -89.5 -17.5t-70.5 -49t-52.5 -75t-35.5 -95.5h496l-82 -201h-445q-2 [...]
-<glyph unicode="℅" horiz-adv-x="2198" d="M496 557q-90 0 -163 30.5t-124 88.5t-79 142t-28 190q0 105 28 188.5t79 141.5t124 89t163 31q133 0 224.5 -60.5t133.5 -191.5l-209 -69q-18 49 -56.5 80.5t-92.5 31.5q-41 0 -73 -17.5t-54.5 -49t-34.5 -76t-12 -98.5q0 -56 12 -100.5t34 -76t53.5 -48.5t70.5 -17 q57 0 97.5 32.5t57.5 94.5l209 -64q-38 -136 -129 -204t-231 -68zM1700 -25q-91 0 -164 30.5t-124.5 88.5t-79 141.5t-27.5 188.5t27.5 188.5t79 141.5t124.5 89t164 31t164 -31t124.5 -89t79 -141.5t27.5 -188.5 [...]
-<glyph unicode="ℓ" horiz-adv-x="1020" d="M535 -25q-59 0 -108 14.5t-84 46t-55 81t-20 119.5v174q-15 -8 -35.5 -18t-35.5 -15l-84 213q46 20 87.5 42l67.5 36v362q0 121 22.5 203.5t65.5 132t104 71t138 21.5q71 0 131.5 -20t104.5 -59.5t69 -98t25 -135.5q0 -114 -38 -203.5t-96 -160.5t-127 -125.5t-132 -96.5 v-254q0 -57 17.5 -76.5t49.5 -19.5q35 0 78 25t86 71l82 -190q-60 -66 -141 -103t-172 -37zM598 1253q-17 0 -29 -8t-19.5 -30.5t-11 -64t-3.5 -108.5v-211q26 24 49 54.5t40.5 66.5t27.5 77.5t10 88.5q0 65 [...]
-<glyph unicode="№" horiz-adv-x="2435" d="M1081 0l-569 836q-14 21 -30.5 51.5t-28.5 56.5q3 -23 4.5 -54t1.5 -54v-836h-275v1434h260l555 -826q13 -20 30 -50t30 -58q-3 29 -4.5 58.5t-1.5 49.5v826h274v-1434h-246zM1493 213v217h788v-217h-788zM1888 559q-90 0 -163 30.5t-124.5 88.5t-79.5 141.5t-28 188.5 t28 188.5t79.5 141.5t124.5 89t163 31q91 0 163.5 -31t123.5 -89t78.5 -141.5t27.5 -188.5t-27.5 -188.5t-78.5 -141.5t-123.5 -88.5t-163.5 -30.5zM1888 770q82 0 129 62.5t47 175.5q0 112 -47 176.5t-129 64 [...]
-<glyph unicode="™" horiz-adv-x="1731" d="M457 1251v-682h-199v682h-217v183h631v-183h-215zM1389 569v531l-19 -56l-213 -491l-215 491l-20 62v-537h-199v865h242l172 -398l18 -51l19 51l172 398h241v-865h-198z" />
-<glyph unicode="Ω" horiz-adv-x="1532" d="M844 0v219q64 68 111.5 133t79 133.5t47.5 144.5t16 167q0 72 -16.5 142.5t-55 127t-102.5 91t-158 34.5q-95 0 -158.5 -35t-102 -91t-55 -127t-16.5 -144q0 -90 16 -165.5t47.5 -144t79 -133.5t111.5 -133v-219h-545v266h232q-49 48 -91 106t-72.5 125t-47.5 142.5 t-17 159.5q0 77 14 155.5t44.5 152t79 137.5t117 111.5t159 75t205.5 27.5t205.5 -27.5t159 -75t116.5 -111.5t78.5 -137.5t44.5 -152t14 -155.5q0 -84 -17.5 -159.5t-48 -142.5t-72.5 -125t-91 -106h234v-266h-545z" />
-<glyph unicode="℮" horiz-adv-x="1597" d="M809 -25q-96 0 -186 23.5t-168.5 68t-143.5 109.5t-111.5 147.5t-72 181.5t-25.5 212q0 125 27.5 229.5t76 187.5t115.5 145t146.5 103t167.5 61.5t180 20.5q130 0 250 -47t213 -141t151 -233.5t66 -325.5h-1071v-449q24 -35 62.5 -66t88 -54t108.5 -36.5t126 -13.5 q146 0 272 67t224 218l104 -64q-52 -78 -111 -141.5t-131 -108.5t-160 -69.5t-198 -24.5zM1184 1182q-29 34 -69 63t-88.5 50.5t-104 33.5t-113.5 12q-121 0 -217 -36.5t-168 -104.5v-364l760 2v344z" />
-<glyph unicode="⅓" horiz-adv-x="1544" d="M28 -25l979 1483h256l-979 -1483h-256zM125 580v634h-105v170q35 0 60 4t42.5 12t29.5 19t20 25h178v-864h-225zM870 674q40 97 119 151t184 54q65 0 116 -20t86.5 -54t54 -78.5t18.5 -93.5q0 -32 -9 -60t-24 -52t-34.5 -42.5t-41.5 -29.5q63 -23 100 -73t37 -124 q0 -59 -20.5 -107.5t-60.5 -84t-97.5 -55t-132.5 -19.5q-121 0 -203.5 66.5t-119.5 185.5l206 53q14 -54 45.5 -79.5t73.5 -25.5t65 23.5t23 58.5q0 37 -25 59.5t-85 22.5h-78v189h76q46 0 65 20.5t19 48.5q0 29 -1 [...]
-<glyph unicode="⅔" horiz-adv-x="1876" d="M360 -25l979 1483h256l-979 -1483h-256zM51 1202q17 57 47 104t71.5 81t94.5 52.5t115 18.5q68 0 123.5 -19.5t95 -54.5t61 -84t21.5 -108q0 -61 -18 -103.5t-48.5 -74.5t-71.5 -56.5t-87 -48.5q-51 -28 -87 -57t-53 -64h363v-208h-629q0 100 23 179t63.5 140.5t95.5 107 t119 78.5l35.5 20.5t31 22.5t21.5 25.5t8 30.5q0 29 -19.5 49t-57.5 20q-47 0 -74 -26t-41 -74zM1202 674q40 97 119 151t184 54q65 0 116 -20t86.5 -54t54 -78.5t18.5 -93.5q0 -32 -9 -60t-24 -52t-34.5 -4 [...]
-<glyph unicode="⅛" horiz-adv-x="1556" d="M28 -25l979 1483h256l-979 -1483h-256zM125 580v634h-105v170q35 0 60 4t42.5 12t29.5 19t20 25h178v-864h-225zM1167 -16q-71 0 -130 19.5t-101.5 55.5t-66 86.5t-23.5 112.5q0 36 11.5 69t30.5 60.5t44.5 49t54.5 34.5q-45 25 -76 69.5t-31 104.5q0 51 20 94t57 74 t90 48.5t120 17.5t120.5 -17.5t90.5 -48.5t56.5 -74t19.5 -94q0 -57 -31 -103t-76 -71q29 -13 55 -34.5t45.5 -49t30.5 -60.5t11 -69q0 -62 -23 -112.5t-65.5 -86.5t-102 -55.5t-131.5 -19.5zM1167 152q57 0 92 [...]
-<glyph unicode="⅜" horiz-adv-x="1796" d="M268 -25l979 1483h256l-979 -1483h-256zM53 1253q40 97 119 151t184 54q65 0 116 -20t86.5 -54t54 -78.5t18.5 -93.5q0 -32 -9 -60t-24 -52t-34.5 -42t-41.5 -30q63 -23 100 -72.5t37 -124.5q0 -58 -20.5 -107t-60.5 -84t-97.5 -55t-132.5 -20q-121 0 -203.5 66.5 t-119.5 185.5l206 53q14 -54 45.5 -79t73.5 -25t65 23.5t23 58.5q0 37 -25 59.5t-85 22.5h-78v188h76q46 0 65 21t19 49q0 29 -19 49t-53 20q-40 0 -61 -19.5t-35 -53.5zM1407 -16q-71 0 -130 19.5t-101.5 55.5t-66 [...]
-<glyph unicode="⅝" horiz-adv-x="1790" d="M262 -25l979 1483h256l-979 -1483h-256zM158 823q37 -26 80.5 -43.5t91.5 -17.5q57 0 93 28t36 76q0 50 -27 74.5t-67 24.5q-38 0 -64.5 -18t-48.5 -60l-186 98l24 473h541v-196h-352l-9 -129q22 11 53.5 19.5t71.5 8.5q72 0 125.5 -24.5t89 -64.5t53 -92.5t17.5 -107.5 q0 -73 -25.5 -130t-70.5 -96.5t-107 -60t-135 -20.5q-54 0 -98 8t-80.5 21.5t-66 30t-54.5 34.5zM1401 -16q-71 0 -130 19.5t-101.5 55.5t-66 86.5t-23.5 112.5q0 36 11.5 69t30.5 60.5t44.5 49t54.5 34.5q-4 [...]
-<glyph unicode="⅞" horiz-adv-x="1608" d="M80 -25l979 1483h256l-979 -1483h-256zM178 580q1 103 15.5 195t41 175.5t63 157.5t81.5 141h-369v209h619v-174q-43 -50 -76.5 -109t-58 -122t-42 -127.5t-28 -126t-15.5 -117.5t-6 -102h-225zM1218 -16q-71 0 -130 19.5t-101.5 55.5t-66 86.5t-23.5 112.5q0 36 11.5 69 t30.5 60.5t44.5 49t54.5 34.5q-45 25 -76 69.5t-31 104.5q0 51 20 94t57 74t90 48.5t120 17.5t120.5 -17.5t90.5 -48.5t56.5 -74t19.5 -94q0 -57 -31 -103t-76 -71q29 -13 55 -34.5t45.5 -49t30.5 -60.5t11 [...]
-<glyph unicode="∂" horiz-adv-x="1221" d="M526 -25q-105 0 -191 30t-146.5 87.5t-94 142.5t-33.5 195q0 108 41.5 196t110.5 151t160 97.5t190 34.5q51 0 92 -7t73.5 -19.5t58.5 -29.5t49 -38q-14 104 -58 176t-107.5 117.5t-141 67.5t-158.5 26l65 256q181 -18 309 -77t208.5 -156.5t117.5 -233.5t37 -309 q0 -91 -13.5 -178.5t-43 -166t-75.5 -145t-110.5 -115t-149 -75.5t-190.5 -27zM547 231q56 0 102.5 18t80.5 49.5t52.5 73t18.5 89.5q0 93 -58 142.5t-155 49.5q-56 0 -103 -18t-80.5 -49t-52 -73.5t-18.5 -90.5q0 [...]
-<glyph unicode="∆" horiz-adv-x="1313" d="M20 0l510 1434h254l508 -1434h-1272zM700 922l-21 62.5t-20 66.5q-8 -31 -19.5 -66.5t-21.5 -62.5l-225 -662h529z" />
-<glyph unicode="∏" horiz-adv-x="1509" d="M1059 -287v1465h-608v-1465h-267v1721h1141v-1721h-266z" />
-<glyph unicode="∑" horiz-adv-x="1272" d="M92 -285v250l463 631l-434 590v250h1042v-256h-706l434 -586l-457 -623h766v-256h-1108z" />
-<glyph unicode="−" horiz-adv-x="1290" d="M164 573v258h962v-258h-962z" />
-<glyph unicode="∕" horiz-adv-x="743" d="M-246 -25l979 1483h256l-979 -1483h-256z" />
-<glyph unicode="∙" horiz-adv-x="494" d="M246 547q-37 0 -68.5 13t-54.5 35.5t-36 53.5t-13 66t13 66t36 54t54.5 36.5t68.5 13.5t68.5 -13.5t55 -36.5t37 -54t13.5 -66t-13.5 -66t-37 -53.5t-55 -35.5t-68.5 -13z" />
-<glyph unicode="√" horiz-adv-x="1184" d="M1122 1434l-438 -1434h-207l-385 723h289l168 -326l299 1037h274z" />
-<glyph unicode="∞" horiz-adv-x="2073" d="M1491 225q-78 0 -145.5 19.5t-124 52.5t-103 77t-82.5 93q-37 -52 -82.5 -96.5t-101.5 -76.5t-123.5 -50.5t-148.5 -18.5q-107 0 -199.5 32t-161 93.5t-108 150.5t-39.5 204q0 114 39.5 203t108 150.5t161 93.5t199.5 32q81 0 148.5 -18.5t123.5 -50.5t101.5 -76.5 t82.5 -96.5q35 48 81 91.5t102.5 77t123.5 53.5t144 20q108 0 202 -32t163.5 -93.5t109 -150.5t39.5 -203q0 -115 -39.5 -204t-108.5 -150.5t-162 -93.5t-200 -32zM1485 481q53 0 98.5 15t79.5 43.5t53 70.5t19 95 [...]
-<glyph unicode="∫" horiz-adv-x="881" d="M315 -287q-63 0 -119.5 12t-113.5 39v218q61 -23 94.5 -29t54.5 -6q34 0 55 17t21 54v1188q0 78 22.5 127t59 76.5t82.5 38t94 10.5q63 0 120 -11.5t114 -39.5v-217q-31 12 -54 18.5t-41 10.5t-31 5t-24 1q-34 0 -55 -17.5t-21 -54.5v-1188q0 -78 -22.5 -127t-59 -76.5 t-82.5 -38t-94 -10.5z" />
-<glyph unicode="≈" horiz-adv-x="1290" d="M838 752q-66 0 -118.5 17t-96.5 38l-82.5 38t-75.5 17q-47 0 -68 -31t-26 -79h-211q0 73 16 140t51 118t90.5 81.5t133.5 30.5q65 0 116 -17t94.5 -38l82 -38t79.5 -17q47 0 68.5 30.5t26.5 79.5h210q0 -73 -15.5 -140t-50 -118t-90 -81.5t-134.5 -30.5zM838 285 q-66 0 -118.5 17t-96.5 38l-82.5 38t-75.5 17q-47 0 -68 -31t-26 -79h-211q0 73 16 140t51 118t90.5 81.5t133.5 30.5q65 0 116 -17t94.5 -38l82 -38t79.5 -17q47 0 68.5 30.5t26.5 79.5h210q0 -73 -15.5 -140t-50 - [...]
-<glyph unicode="≠" horiz-adv-x="1290" d="M614 346l-65 -151h-289l66 151h-187v258h297l86 197h-383v258h496l76 176h288l-75 -176h184v-258h-297l-84 -197h381v-258h-494z" />
-<glyph unicode="≤" horiz-adv-x="1290" d="M1128 319l-964 353v211l964 350v-258l-589 -199l589 -198v-259zM164 0v258h962v-258h-962z" />
-<glyph unicode="≥" horiz-adv-x="1290" d="M164 319v259l590 198l-590 199v258l964 -350v-211zM164 0v258h962v-258h-962z" />
-<glyph unicode="◊" horiz-adv-x="1245" d="M526 -25l-413 742l413 741h193l414 -741l-414 -742h-193zM623 1106l-224 -389l224 -389l223 389z" />
-<glyph unicode="◌" horiz-adv-x="1319" d="M113 735q0 29 18.5 46.5t46.5 17.5t45 -17.5t17 -46.5q0 -28 -17 -44.5t-45 -16.5t-46.5 16.5t-18.5 44.5zM1079 735q0 29 17 46.5t45 17.5q26 0 45.5 -17.5t19.5 -46.5q0 -28 -19.5 -44.5t-45.5 -16.5q-28 0 -45 16.5t-17 44.5zM598 254q0 28 17 45.5t44 17.5 q28 0 45 -17.5t17 -45.5t-17 -46t-45 -18q-27 0 -44 18t-17 46zM598 1217q0 29 17 47t44 18q28 0 45 -18t17 -47q0 -28 -17 -45t-45 -17q-27 0 -44 17t-17 45zM938 1077q0 28 17 45t44 17q28 0 46 -17t18 -45q0 -29 -1 [...]
-<glyph unicode="" horiz-adv-x="1045" d="M0 1045h1045v-1045h-1045v1045z" />
-<glyph unicode="" horiz-adv-x="823" d="M412 -14q-96 0 -160 39t-103 102.5t-55 143.5t-16 161q0 51 7 103.5t22.5 102.5t41 93.5t62.5 76.5t86.5 52t114.5 19q96 0 160 -39.5t102.5 -103.5t54.5 -143.5t16 -160.5q0 -50 -6.5 -102.5t-21.5 -102.5t-40 -93.5t-62 -76.5t-87.5 -52t-115.5 -19zM412 195 q29 0 49.5 19.5t33.5 52.5t19 76t6 89t-6 89t-19 76t-33.5 53t-49.5 20t-50 -20t-34 -53t-19 -76t-6 -89t6 -89t19 -76t34 -52.5t50 -19.5z" />
-<glyph unicode="" horiz-adv-x="459" d="M125 0v635h-105v170q35 0 60 4t42.5 11.5t29.5 18.5t20 25h178v-864h-225z" />
-<glyph unicode="" horiz-adv-x="758" d="M51 623q17 57 47 104t71.5 80.5t94.5 52.5t115 19q68 0 123.5 -19.5t95 -55t61 -84.5t21.5 -108q0 -60 -18 -103t-48.5 -75t-71.5 -56t-87 -48q-51 -28 -87 -57.5t-53 -63.5h363v-209h-629q0 100 23 179t63.5 140.5t95.5 107.5t119 79l35.5 20t31 22t21.5 25.5t8 30.5 q0 29 -19.5 49.5t-57.5 20.5q-47 0 -74 -26.5t-41 -74.5z" />
-<glyph unicode="" horiz-adv-x="727" d="M53 674q40 97 119 151t184 54q65 0 116 -20t86.5 -54t54 -78.5t18.5 -93.5q0 -32 -9 -60t-24 -52t-34.5 -42.5t-41.5 -29.5q63 -23 100 -73t37 -124q0 -59 -20.5 -107.5t-60.5 -84t-97.5 -55t-132.5 -19.5q-121 0 -203.5 66.5t-119.5 185.5l206 53q14 -54 45.5 -79.5 t73.5 -25.5t65 23.5t23 58.5q0 37 -25 59.5t-85 22.5h-78v189h76q46 0 65 20.5t19 48.5q0 29 -19 49.5t-53 20.5q-40 0 -61 -20t-35 -54z" />
-<glyph unicode="" horiz-adv-x="805" d="M434 0v164h-420v147l424 553h205v-512h115v-188h-115v-164h-209zM434 352v238l-180 -238h180z" />
-<glyph unicode="" horiz-adv-x="737" d="M158 244q37 -26 80.5 -44t91.5 -18q57 0 93 28.5t36 76.5q0 49 -27 73.5t-67 24.5q-38 0 -64.5 -18t-48.5 -60l-186 99l24 473h541v-197h-352l-9 -129q22 11 53.5 20t71.5 9q72 0 125.5 -24.5t89 -65t53 -92.5t17.5 -107q0 -73 -25.5 -130t-70.5 -96.5t-107 -60 t-135 -20.5q-54 0 -98 8t-80.5 21.5t-66 30t-54.5 34.5z" />
-<glyph unicode="" horiz-adv-x="762" d="M397 -14q-82 0 -143 27t-101.5 74.5t-60.5 113t-20 143.5q0 107 32.5 199.5t99 164.5t168 120.5t238.5 66.5v-184q-49 -9 -95.5 -24t-86 -38t-69.5 -54.5t-46 -72.5q22 13 55.5 19t71.5 6q59 0 110.5 -18t89.5 -53.5t59.5 -87.5t21.5 -120q0 -65 -24 -117.5t-66.5 -89 t-102 -56t-131.5 -19.5zM391 170q53 0 89 26t36 72q0 48 -36 71t-89 23q-23 0 -44 -5.5t-36.5 -17.5t-25 -29.5t-9.5 -41.5q0 -23 9.5 -41t25 -31t36.5 -19.5t44 -6.5z" />
-<glyph unicode="" horiz-adv-x="649" d="M178 0q1 103 15.5 195.5t41 175.5t63 157.5t81.5 141.5h-369v209h619v-174q-65 -75 -107.5 -169t-68.5 -190t-37.5 -186.5t-12.5 -159.5h-225z" />
-<glyph unicode="" horiz-adv-x="776" d="M387 -16q-71 0 -130 19.5t-101.5 55.5t-66 86.5t-23.5 112.5q0 36 11.5 69t30.5 60.5t44.5 49t54.5 34.5q-45 25 -76 69.5t-31 104.5q0 51 20 94t57 74t90 48.5t120 17.5t120.5 -17.5t90.5 -48.5t56.5 -74t19.5 -94q0 -57 -31 -103t-76 -71q29 -13 55 -34.5t45.5 -49 t30.5 -60.5t11 -69q0 -62 -23 -112.5t-65.5 -86.5t-102 -55.5t-131.5 -19.5zM387 152q57 0 92 29t35 77q0 51 -35 84t-92 33t-92 -33t-35 -84q0 -48 35 -77t92 -29zM387 565q42 0 64 20.5t22 49.5t-22 51.5t-64 2 [...]
-<glyph unicode="" horiz-adv-x="754" d="M186 170q99 2 167 42t96 130q-22 -12 -56 -18.5t-65 -6.5q-63 0 -115.5 18.5t-91 53.5t-59.5 87t-21 120q0 63 23.5 115t66 89.5t101.5 58t130 20.5q82 0 144 -27t102.5 -73.5t61 -109t20.5 -134.5q0 -112 -28 -210.5t-88.5 -173t-156.5 -118.5t-231 -47v184zM367 502 q49 0 83.5 23t34.5 71q0 46 -34.5 72t-83.5 26q-25 0 -47 -6.5t-38.5 -19.5t-26 -31t-9.5 -41q0 -24 9.5 -41.5t26 -29.5t38.5 -17.5t47 -5.5z" />
-<glyph unicode="" horiz-adv-x="823" d="M412 565q-96 0 -160 39.5t-103 103t-55 143.5t-16 161q0 51 7 103.5t22.5 102t41 93.5t62.5 76.5t86.5 51.5t114.5 19q96 0 160 -39.5t102.5 -103t54.5 -143t16 -160.5q0 -50 -6.5 -103t-21.5 -102.5t-40 -93.5t-62 -77t-87.5 -52t-115.5 -19zM412 774q29 0 49.5 20 t33.5 53t19 76t6 89t-6 89t-19 76t-33.5 52.5t-49.5 19.5t-50 -19.5t-34 -52.5t-19 -76t-6 -89t6 -89t19 -76t34 -53t50 -20z" />
-<glyph unicode="" horiz-adv-x="459" d="M125 580v634h-105v170q35 0 60 4t42.5 12t29.5 19t20 25h178v-864h-225z" />
-<glyph unicode="" horiz-adv-x="758" d="M51 1202q17 57 47 104t71.5 81t94.5 52.5t115 18.5q68 0 123.5 -19.5t95 -54.5t61 -84t21.5 -108q0 -61 -18 -103.5t-48.5 -74.5t-71.5 -56.5t-87 -48.5q-51 -28 -87 -57t-53 -64h363v-208h-629q0 100 23 179t63.5 140.5t95.5 107t119 78.5l35.5 20.5t31 22.5t21.5 25.5 t8 30.5q0 29 -19.5 49t-57.5 20q-47 0 -74 -26t-41 -74z" />
-<glyph unicode="" horiz-adv-x="727" d="M53 1253q40 97 119 151t184 54q65 0 116 -20t86.5 -54t54 -78.5t18.5 -93.5q0 -32 -9 -60t-24 -52t-34.5 -42t-41.5 -30q63 -23 100 -72.5t37 -124.5q0 -58 -20.5 -107t-60.5 -84t-97.5 -55t-132.5 -20q-121 0 -203.5 66.5t-119.5 185.5l206 53q14 -54 45.5 -79t73.5 -25 t65 23.5t23 58.5q0 37 -25 59.5t-85 22.5h-78v188h76q46 0 65 21t19 49q0 29 -19 49t-53 20q-40 0 -61 -19.5t-35 -53.5z" />
-<glyph unicode="" horiz-adv-x="805" d="M434 580v163h-420v148l424 553h205v-512h115v-189h-115v-163h-209zM434 932v237l-180 -237h180z" />
-<glyph unicode="" horiz-adv-x="737" d="M158 823q37 -26 80.5 -43.5t91.5 -17.5q57 0 93 28t36 76q0 50 -27 74.5t-67 24.5q-38 0 -64.5 -18t-48.5 -60l-186 98l24 473h541v-196h-352l-9 -129q22 11 53.5 19.5t71.5 8.5q72 0 125.5 -24.5t89 -64.5t53 -92.5t17.5 -107.5q0 -73 -25.5 -130t-70.5 -96.5t-107 -60 t-135 -20.5q-54 0 -98 8t-80.5 21.5t-66 30t-54.5 34.5z" />
-<glyph unicode="" horiz-adv-x="762" d="M397 565q-82 0 -143 27t-101.5 75t-60.5 113.5t-20 143.5q0 106 32.5 198.5t99 164.5t168 120.5t238.5 67.5v-185q-49 -8 -95.5 -23.5t-86 -38.5t-69.5 -54.5t-46 -71.5q22 12 55.5 18t71.5 6q59 0 110.5 -18t89.5 -53.5t59.5 -87.5t21.5 -119q0 -65 -24 -117.5 t-66.5 -89.5t-102 -56.5t-131.5 -19.5zM391 750q53 0 89 25.5t36 72.5q0 24 -9.5 41.5t-26.5 29t-40 17.5t-49 6q-23 0 -44 -6t-36.5 -17.5t-25 -29t-9.5 -41.5q0 -23 9.5 -41.5t25 -31t36.5 -19t44 -6.5z" />
-<glyph unicode="" horiz-adv-x="649" d="M178 580q1 103 15.5 195t41 175.5t63 157.5t81.5 141h-369v209h619v-174q-43 -50 -76.5 -109t-58 -122t-42 -127.5t-28 -126t-15.5 -117.5t-6 -102h-225z" />
-<glyph unicode="" horiz-adv-x="776" d="M387 563q-71 0 -130 20t-101.5 56t-66 86.5t-23.5 112.5q0 36 11.5 69t30.5 60.5t44.5 49t54.5 34.5q-45 24 -76 69t-31 105q0 51 20 94t57 74t90 48t120 17t120.5 -17t90.5 -48t56.5 -74t19.5 -94q0 -57 -31 -103.5t-76 -70.5q29 -13 55 -34.5t45.5 -49t30.5 -60.5 t11 -69q0 -62 -23 -112.5t-65.5 -86.5t-102 -56t-131.5 -20zM387 731q57 0 92 29.5t35 77.5q0 51 -35 83.5t-92 32.5t-92 -32.5t-35 -83.5q0 -48 35 -77.5t92 -29.5zM387 1145q42 0 64 20t22 49t-22 51.5t-64 22.5 [...]
-<glyph unicode="" horiz-adv-x="754" d="M186 750q99 2 167 42t96 130q-22 -13 -56 -19t-65 -6q-63 0 -115.5 18t-91 53.5t-59.5 87.5t-21 120q0 63 23.5 114.5t66 89t101.5 58t130 20.5q82 0 144 -26.5t102.5 -73t61 -109.5t20.5 -135q0 -112 -28 -210.5t-88.5 -173t-156.5 -118.5t-231 -47v185zM367 1081 q49 0 83.5 23.5t34.5 71.5q0 46 -34.5 72t-83.5 26q-25 0 -47 -6.5t-38.5 -19.5t-26 -31t-9.5 -41q0 -24 9.5 -42t26 -29.5t38.5 -17.5t47 -6z" />
-<glyph unicode="" horiz-adv-x="973" d="M362 -516q35 34 62 70t41 81q-26 0 -50 9.5t-42.5 27.5t-29.5 43.5t-11 57.5q0 35 12 63.5t32 49t47.5 31.5t59.5 11q69 0 113.5 -43t44.5 -125q0 -98 -44.5 -177t-141.5 -158z" />
-<glyph unicode="" horiz-adv-x="973" d="M641 1495v-16q0 -86 -16.5 -159t-46 -135.5t-71 -116.5t-91.5 -103l-86 65q14 28 25.5 77t20 110.5t13 132.5t4.5 145h248z" />
-<glyph horiz-adv-x="1532" d="M844 0v219q64 68 111.5 133t79 133.5t47.5 144.5t16 167q0 72 -16.5 142.5t-55 127t-102.5 91t-158 34.5q-95 0 -158.5 -35t-102 -91t-55 -127t-16.5 -144q0 -90 16 -165.5t47.5 -144t79 -133.5t111.5 -133v-219h-545v266h232q-49 48 -91 106t-72.5 125t-47.5 142.5 t-17 159.5q0 77 14 155.5t44.5 152t79 137.5t117 111.5t159 75t205.5 27.5t205.5 -27.5t159 -75t116.5 -111.5t78.5 -137.5t44.5 -152t14 -155.5q0 -84 -17.5 -159.5t-48 -142.5t-72.5 -125t-91 -106h234v-266h-545z" />
-<glyph horiz-adv-x="1221" d="M801 0v96q-29 -52 -85 -86.5t-134 -34.5q-53 0 -95 16t-65 36v-312l-268 -125v1457h266v-615q0 -53 14 -91t38.5 -62.5t58 -36t71.5 -11.5q42 0 78 10.5t63 34t42.5 61t15.5 91.5v619h266v-1047h-266z" />
-<glyph horiz-adv-x="471" />
-<glyph horiz-adv-x="578" d="M156 0v1047h266v-1047h-266zM289 1169q-32 0 -60.5 12.5t-49.5 33.5t-33.5 49.5t-12.5 60.5t12.5 60t33.5 49t49.5 33t60.5 12t60.5 -12t49.5 -33t33 -49t12 -60t-12 -60.5t-33 -49.5t-49.5 -33.5t-60.5 -12.5z" />
-</font>
-</defs></svg>
\ No newline at end of file
diff --git a/pcsd/public/css/overpass_bold-web.ttf b/pcsd/public/css/overpass_bold-web.ttf
deleted file mode 100755
index 2483038..0000000
Binary files a/pcsd/public/css/overpass_bold-web.ttf and /dev/null differ
diff --git a/pcsd/public/css/overpass_bold-web.woff b/pcsd/public/css/overpass_bold-web.woff
deleted file mode 100755
index 03ad6a0..0000000
Binary files a/pcsd/public/css/overpass_bold-web.woff and /dev/null differ
diff --git a/pcsd/public/css/overpass_regular-web.eot b/pcsd/public/css/overpass_regular-web.eot
deleted file mode 100755
index 612b64a..0000000
Binary files a/pcsd/public/css/overpass_regular-web.eot and /dev/null differ
diff --git a/pcsd/public/css/overpass_regular-web.svg b/pcsd/public/css/overpass_regular-web.svg
deleted file mode 100755
index 80651dd..0000000
--- a/pcsd/public/css/overpass_regular-web.svg
+++ /dev/null
@@ -1,470 +0,0 @@
-<?xml version="1.0" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
-<svg xmlns="http://www.w3.org/2000/svg">
-<metadata>
-This is a custom SVG webfont generated by Font Squirrel.
-Copyright : Copyright c 2011 by Red Hat Inc All rights reserved
-Designer : Delve Withrington
-Foundry : Delve Fonts
-Foundry URL : httpwwwredhatcom
-</metadata>
-<defs>
-<font id="webfontLTZe4IYH" horiz-adv-x="1157" >
-<font-face units-per-em="2048" ascent="1536" descent="-512" />
-<missing-glyph horiz-adv-x="471" />
-<glyph unicode="fi" horiz-adv-x="1194" d="M440 862v-862h-213v862h-155v185h155v167q0 72 21 119.5t54.5 75t76.5 38.5t88 11q62 0 112 -13t97 -36v-176q-48 20 -83 28.5t-67 8.5q-26 0 -42.5 -7t-26.5 -20t-13.5 -30t-3.5 -37v-129h236v-185h-236zM825 0v1047h213v-1047h-213zM932 1210q-27 0 -50.5 10t-41 27.5 t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="fl" horiz-adv-x="1196" d="M440 862v-862h-213v862h-155v185h155v167q0 72 21 119.5t54.5 75t76.5 38.5t88 11q62 0 112 -13t97 -36v-176q-48 20 -83 28.5t-67 8.5q-26 0 -42.5 -7t-26.5 -20t-13.5 -30t-3.5 -37v-129h236v-185h-236zM819 0v1378l213 101v-1479h-213z" />
-<glyph unicode=" " horiz-adv-x="471" />
-<glyph unicode="	" horiz-adv-x="471" />
-<glyph unicode=" " horiz-adv-x="471" />
-<glyph unicode="!" horiz-adv-x="565" d="M236 395l-60 609v454h213v-454l-59 -609h-94zM283 -25q-33 0 -61 12t-49 32.5t-32.5 47.5t-11.5 58t11.5 58.5t32.5 48t49 32.5t61 12q32 0 60.5 -12t49.5 -32.5t33 -48t12 -58.5t-12 -58t-33 -47.5t-49.5 -32.5t-60.5 -12z" />
-<glyph unicode=""" horiz-adv-x="860" d="M92 1434h266l-43 -574h-180zM502 1434h266l-43 -574h-180z" />
-<glyph unicode="#" horiz-adv-x="1393" d="M260 0l59 387h-233l27 176h233l51 338h-233l26 176h234l55 357h183l-56 -357h285l55 357h182l-55 -357h234l-27 -176h-233l-52 -338h234l-27 -176h-233l-60 -387h-182l59 387h-284l-60 -387h-182zM528 563h285l51 338h-284z" />
-<glyph unicode="$" horiz-adv-x="1196" d="M813 1108q-12 31 -33.5 57.5t-51.5 46t-68.5 30.5t-86.5 11q-94 0 -147 -45t-53 -127q0 -51 30 -86.5t79.5 -62.5t112 -49.5t128.5 -49t128.5 -61t112 -84t79.5 -119t30 -166.5q0 -90 -28.5 -161.5t-78.5 -124.5t-118 -86.5t-146 -46.5v-185h-213v187 q-142 26 -242.5 114.5t-144.5 223.5l201 73q21 -48 51 -88t69 -68.5t86 -44.5t101 -16q55 0 99.5 14t76 42t49 69.5t17.5 95.5q0 64 -30 107.5t-79.5 75.5t-112 56l-128.5 49t-128.5 56t-112 76t-79.5 108t-30 154q0 66 22 126.5t65 10 [...]
-<glyph unicode="%" horiz-adv-x="1743" d="M428 729q-92 0 -156 33t-104 85t-58 117t-18 130q0 60 17.5 124t57.5 117.5t104 88t157 34.5q92 0 156 -32.5t104 -85t58 -117.5t18 -129q0 -59 -17 -123.5t-56.5 -118t-104 -88.5t-158.5 -35zM428 893q42 0 73 16t51.5 43.5t30 64t9.5 77.5q0 43 -10 80t-30.5 63.5 t-51 41.5t-72.5 15t-73 -16t-51.5 -43t-30 -63.5t-9.5 -77.5q0 -44 10 -81t30.5 -63.5t51 -41.5t72.5 -15zM479 -25h-194l979 1483h194zM1315 -25q-92 0 -156 33t-104 85t-58 117t-18 130q0 60 17.5 124.5t57.5 118t104 [...]
-<glyph unicode="&" horiz-adv-x="1436" d="M1106 0l-123 152q-84 -81 -192 -129t-242 -48q-94 0 -172 29t-134.5 81.5t-88 127t-31.5 166.5q0 85 27 151t71.5 117.5t102 92t118.5 75.5q-37 46 -64.5 85t-46 76.5t-27.5 75.5t-9 83q0 76 27.5 136t75 101.5t111.5 63.5t138 22t137 -24.5t109.5 -68t73 -103.5 t26.5 -132q0 -65 -20 -117.5t-54 -95t-79 -77.5t-95 -66l226 -270q18 43 28.5 94t10.5 96h207q-2 -90 -27.5 -185t-73.5 -181l273 -328h-283zM578 176q83 0 152.5 37t123.5 96l-289 357q-45 -27 -83.5 -56t-67.5 -62.5 [...]
-<glyph unicode="'" horiz-adv-x="451" d="M92 1434h266l-43 -574h-180z" />
-<glyph unicode="(" horiz-adv-x="707" d="M393 -266q-47 61 -92 143t-80 187t-56.5 234t-21.5 284t23.5 288t61.5 243.5t87.5 196.5t100.5 148h209q-21 -24 -48.5 -68t-57.5 -104t-59 -136t-52.5 -165t-37.5 -190.5t-14 -212.5q0 -168 28.5 -307.5t68 -247.5t81.5 -182t68 -111h-209z" />
-<glyph unicode=")" horiz-adv-x="707" d="M104 -266q26 37 68 111t81.5 182t68 247.5t28.5 307.5q0 111 -14 212.5t-37 190.5t-52 165t-59 136t-58 103.5t-48 68.5h209q51 -62 100 -148t87.5 -196.5t61.5 -243.5t23 -288t-21.5 -284t-56.5 -234t-80 -187t-92 -143h-209z" />
-<glyph unicode="*" horiz-adv-x="1085" d="M590 791l-47 141l-47 -141l-142 -191l-153 113l139 190l115 86h-142l-211 70l60 182l211 -67l112 -80l-39 133v231h193v-231l-41 -135l115 82l211 67l59 -182l-211 -70h-143l116 -86l140 -190l-154 -113z" />
-<glyph unicode="+" horiz-adv-x="1290" d="M752 600v-379h-213v379h-375v205h375v379h213v-379h374v-205h-374z" />
-<glyph unicode="," horiz-adv-x="457" d="M84 -238q14 12 33.5 34.5t38.5 51t33.5 61t19.5 66.5q-27 3 -52 14t-43.5 30t-30 44t-11.5 56q0 35 12.5 63.5t33 49t48 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-47.5 -104.5t-70.5 -89t-85.5 -68.5z" />
-<glyph unicode="-" horiz-adv-x="809" d="M154 506v205h501v-205h-501z" />
-<glyph unicode="." horiz-adv-x="457" d="M227 -25q-33 0 -61 12t-48.5 32.5t-32 47.5t-11.5 58t11.5 58.5t32 48t48.5 32.5t61 12q32 0 60.5 -12t49.5 -32.5t33.5 -48t12.5 -58.5t-12.5 -58t-33.5 -47.5t-49.5 -32.5t-60.5 -12z" />
-<glyph unicode="/" horiz-adv-x="1010" d="M-31 -266l805 1724h215l-805 -1724h-215z" />
-<glyph unicode="0" horiz-adv-x="1331" d="M666 -25q-99 0 -175.5 30.5t-133.5 83t-95.5 124t-62.5 154t-34.5 172t-10.5 178.5q0 82 9.5 169t33 169.5t61.5 155.5t95 128t134 87t179 32q99 0 175 -30.5t133 -83t96 -124t63 -154t34.5 -172t10.5 -177.5q0 -82 -10 -169.5t-33 -169.5t-61 -155.5t-95 -128.5 t-134.5 -87t-178.5 -32zM666 180q59 0 103 22.5t76.5 61.5t53.5 91t34 111.5t18 123.5t5 127q0 100 -13 196t-46 171.5t-89 122t-142 46.5q-59 0 -103.5 -22.5t-77 -61t-53.5 -90.5t-34 -111.5t-18 -124t-5 -126.5q0 -66 5. [...]
-<glyph unicode="1" horiz-adv-x="827" d="M307 0v1112h-235v158q52 0 97 8t81.5 27t63.5 50.5t42 78.5h164v-1434h-213z" />
-<glyph unicode="2" horiz-adv-x="1262" d="M156 1151q26 68 72 124.5t109 97t140 63t164 22.5q109 0 196 -31.5t147.5 -88t93 -134.5t32.5 -170q0 -91 -27.5 -159t-76.5 -121.5t-117.5 -97t-149.5 -85.5q-141 -74 -230.5 -161.5t-115.5 -200.5h729v-209h-989q0 147 43 265t115 211t165 163t193 121q46 24 89.5 51 t77 60.5t53.5 75t20 93.5q0 45 -18.5 83.5t-51.5 67.5t-79 45.5t-101 16.5q-117 0 -186 -51t-103 -139z" />
-<glyph unicode="3" horiz-adv-x="1239" d="M604 -25q-95 0 -174.5 23.5t-142.5 67t-110 104t-77 135.5l199 80q25 -56 58.5 -95t73.5 -63.5t84 -35.5t89 -11q58 0 109 13.5t89 42t60.5 72.5t22.5 106q0 47 -15.5 88.5t-49 73t-85.5 49.5t-125 18h-110v205h110q50 0 90.5 14.5t69.5 40t45 61t16 78.5 q0 100 -57.5 155.5t-169.5 55.5q-51 0 -91 -13.5t-70.5 -35.5t-53.5 -51.5t-39 -60.5l-198 75q25 65 67.5 118.5t100 92t130.5 59.5t160 21q97 0 178.5 -31.5t140.5 -85.5t91.5 -125.5t32.5 -152.5q0 -100 -45.5 -181.5t-139.5 -12 [...]
-<glyph unicode="4" horiz-adv-x="1325" d="M803 0v326h-772v137l733 971h252v-912h186v-196h-186v-326h-213zM803 522v654l-490 -654h490z" />
-<glyph unicode="5" horiz-adv-x="1241" d="M164 725l49 709h805v-205h-606l-31 -365q45 32 115.5 55t158.5 23q115 0 200.5 -39t142 -103.5t84.5 -147.5t28 -171q0 -115 -41 -208.5t-113.5 -159.5t-173.5 -102t-221 -36q-87 0 -156.5 12t-124 31t-93.5 42.5t-64 45.5l127 170q25 -17 54 -34t66 -31t83.5 -22.5 t107.5 -8.5q74 0 134.5 21.5t103.5 60.5t66.5 93.5t23.5 119.5q0 64 -19 113t-54 82t-83.5 50t-107.5 17q-75 0 -131.5 -26t-98.5 -72z" />
-<glyph unicode="6" horiz-adv-x="1229" d="M631 -25q-126 0 -219.5 42.5t-155.5 117.5t-92.5 176.5t-30.5 218.5q0 179 46.5 333.5t143 275t244.5 202.5t351 117v-205q-89 -14 -172.5 -47t-153.5 -88.5t-123.5 -134.5t-81.5 -184q21 20 50 38.5t66.5 32.5t82.5 22.5t96 8.5q97 0 179 -33.5t140.5 -95t91.5 -147 t33 -189.5q0 -103 -36 -188t-101 -145.5t-156 -94t-202 -33.5zM631 180q60 0 110.5 17t86.5 50t56.5 80.5t20.5 108.5q0 62 -20.5 110.5t-56.5 81.5t-86.5 50.5t-110.5 17.5t-111 -17.5t-87.5 -50.5t-57.5 -81.5t-21 -1 [...]
-<glyph unicode="7" horiz-adv-x="1100" d="M371 0q1 183 28 343.5t78.5 308.5t127.5 288.5t177 284.5h-731v209h967v-232q-80 -93 -143 -196.5t-110 -210.5t-80 -215t-53.5 -210.5t-30 -196.5t-9.5 -173h-221z" />
-<glyph unicode="8" horiz-adv-x="1260" d="M629 -25q-111 0 -202.5 33t-157 92.5t-101 141t-35.5 178.5q0 66 17.5 121.5t49 100.5t76 80.5t97.5 61.5q-85 48 -131.5 121t-46.5 178q0 80 31 148.5t88 119t137 79t178 28.5t178.5 -28.5t137.5 -79t88.5 -119t31.5 -148.5q0 -103 -47 -176t-133 -123q54 -26 98 -61.5 t76 -80.5t49.5 -100.5t17.5 -121.5q0 -97 -36 -178.5t-101 -141t-157 -92.5t-203 -33zM629 180q60 0 110.5 18t87 50.5t57.5 78t21 99.5q0 52 -21 98.5t-57.5 81t-87 54.5t-110.5 20q-61 0 -111.5 -20t-87 -54.5t-56 [...]
-<glyph unicode="9" horiz-adv-x="1229" d="M352 193q82 4 161 26.5t146 72.5t116.5 133.5t72.5 211.5q-16 -18 -42.5 -37t-62.5 -34t-81.5 -24.5t-98.5 -9.5q-100 0 -184.5 33t-146 93.5t-96 146.5t-34.5 192q0 103 36.5 188t102 145.5t157 94t202.5 33.5q129 0 222.5 -42.5t154.5 -116.5t90 -176t29 -220 q0 -201 -45 -367.5t-136.5 -288t-232 -192.5t-330.5 -80v218zM598 750q61 0 111.5 17t86 49.5t55 78.5t19.5 104q0 55 -20 100t-56 76.5t-85.5 48.5t-108.5 17q-60 0 -110 -17t-86 -48.5t-56 -77t-20 -101.5q0 -59 19 -105t5 [...]
-<glyph unicode=":" horiz-adv-x="457" d="M227 690q-33 0 -61 12t-48.5 32.5t-32 47.5t-11.5 58t11.5 58.5t32 48t48.5 32.5t61 12q32 0 60.5 -12t49.5 -32.5t33.5 -48t12.5 -58.5t-12.5 -58t-33.5 -47.5t-49.5 -32.5t-60.5 -12zM227 -25q-33 0 -61 12t-48.5 32.5t-32 47.5t-11.5 58t11.5 58.5t32 48t48.5 32.5 t61 12q32 0 60.5 -12t49.5 -32.5t33.5 -48t12.5 -58.5t-12.5 -58t-33.5 -47.5t-49.5 -32.5t-60.5 -12z" />
-<glyph unicode=";" horiz-adv-x="457" d="M225 690q-33 0 -61 12t-48.5 32.5t-32 47.5t-11.5 58t11.5 58.5t32 48t48.5 32.5t61 12q32 0 60.5 -12t49.5 -32.5t33.5 -48t12.5 -58.5t-12.5 -58t-33.5 -47.5t-49.5 -32.5t-60.5 -12zM84 -238q14 12 33.5 34.5t38.5 51t33.5 61t19.5 66.5q-27 3 -52 14t-43.5 30t-30 44 t-11.5 56q0 35 12.5 63.5t33 49t48 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-47.5 -104.5t-70.5 -89t-85.5 -68.5z" />
-<glyph unicode="<" horiz-adv-x="1290" d="M1126 221l-962 391v181l962 391v-215l-671 -267l671 -266v-215z" />
-<glyph unicode="=" horiz-adv-x="1290" d="M164 401v205h962v-205h-962zM164 799v205h962v-205h-962z" />
-<glyph unicode=">" horiz-adv-x="1290" d="M1126 612l-962 -391v215l672 266l-672 267v215l962 -391v-181z" />
-<glyph unicode="?" horiz-adv-x="1026" d="M387 389v41q0 73 15 128t40 97.5t57 75.5t65 61l65 54.5t57 56.5t40 66t15 84q0 44 -14.5 81.5t-43 64.5t-70.5 42.5t-97 15.5q-125 0 -193.5 -72.5t-68.5 -205.5h-213q0 112 35 201t98 150.5t150.5 94.5t191.5 33q107 0 188.5 -31t137 -85.5t84 -128.5t28.5 -160 q0 -84 -26 -142t-65.5 -102.5t-85.5 -81.5t-85.5 -78.5t-65.5 -93.5t-26 -127v-39h-213zM492 -25q-33 0 -61.5 12t-49 32.5t-32 47.5t-11.5 58t11.5 58.5t32 48t49 32.5t61.5 12q32 0 60.5 -12t49.5 -32.5t33 -48t12 -58.5 [...]
-<glyph unicode="@" horiz-adv-x="1677" d="M1069 -10q-98 -15 -217 -15q-158 0 -292 47.5t-231 138.5t-151.5 224.5t-54.5 304.5q0 113 26.5 213.5t75.5 186t117 153.5t151.5 116t179 73.5t199.5 25.5q156 0 282 -48.5t215 -132.5t137 -196t48 -239q0 -111 -31.5 -205t-85 -161.5t-123.5 -105.5t-147 -38 q-26 0 -51 8t-45 23.5t-34.5 38t-18.5 51.5q-42 -66 -103 -97.5t-137 -31.5q-66 0 -120.5 25.5t-93 71.5t-60 110.5t-21.5 142.5q0 77 26.5 153t75.5 136t117 97.5t152 37.5q68 0 119.5 -28t83.5 -91l16 92h182l-65 -346l-20. [...]
-<glyph unicode="A" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449z" />
-<glyph unicode="B" horiz-adv-x="1364" d="M184 1434h543q128 0 215 -32.5t140 -86t75.5 -122.5t22.5 -142q0 -90 -49 -169.5t-148 -127.5q67 -24 114 -60.5t77 -80.5t43.5 -93.5t13.5 -99.5q0 -73 -24.5 -148t-80.5 -135.5t-146.5 -98.5t-221.5 -38h-574v1434zM774 197q71 0 117.5 19t73.5 49.5t38 68.5t11 76 q0 45 -13 87.5t-47 75t-92.5 52.5t-148.5 20h-316v-448h377zM692 842q67 0 118 14.5t85 41t51 62.5t17 80q0 35 -9 70t-34.5 63.5t-70 46t-116.5 17.5h-336v-395h295z" />
-<glyph unicode="C" horiz-adv-x="1331" d="M729 -25q-111 0 -198.5 30.5t-153.5 83t-112.5 124t-75.5 154t-42.5 172t-13.5 178.5q0 82 13.5 169t43 169.5t76.5 155.5t113 128t153 87t197 32q101 0 182 -24.5t141.5 -64.5t101.5 -91t63 -104l-195 -91q-26 41 -54.5 72.5t-63 53.5t-77.5 33t-98 11q-72 0 -127.5 -23 t-97 -63t-70.5 -93t-46.5 -112.5t-25.5 -122.5t-8 -122q0 -95 21.5 -191t67.5 -173t117 -125t169 -48q53 0 95.5 13.5t78 38t64 58.5t53.5 75l199 -76q-30 -70 -74.5 -128t-105 -99t-138 -64t-172.5 -23z" />
-<glyph unicode="D" horiz-adv-x="1403" d="M184 1434h402q125 0 224 -28.5t175 -78.5t130 -118.5t88.5 -148t50.5 -167.5t16 -176q0 -82 -14.5 -167.5t-48 -165t-86.5 -149.5t-130 -122.5t-179.5 -82.5t-233.5 -30h-394v1434zM612 201q82 0 146.5 21.5t113 58.5t82 87t55 107.5t31 119t9.5 122.5q0 97 -23.5 189.5 t-73.5 165t-128.5 117t-188.5 44.5h-238v-1032h215z" />
-<glyph unicode="E" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934z" />
-<glyph unicode="F" horiz-adv-x="1145" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-639h-213z" />
-<glyph unicode="G" horiz-adv-x="1413" d="M758 -25q-111 0 -200 29t-158.5 80t-120 121.5t-83 152.5t-48 173.5t-15.5 185.5q0 87 16.5 176t50.5 171.5t85 154.5t120.5 125t157.5 83.5t195 30.5q103 0 179.5 -22.5t134.5 -60.5t100 -87t75 -102l-192 -111q-25 35 -53.5 66.5t-64.5 55.5t-80 38t-99 14 q-78 0 -138.5 -23t-105 -62.5t-75.5 -92t-49.5 -111.5t-27 -121.5t-8.5 -121.5q0 -103 23 -199t72 -170.5t125.5 -119t183.5 -44.5q67 0 123 23.5t96 64.5t62 96t22 119v29h-258v201h477v-187q0 -135 -40 -238.5t-110.5 -174t-1 [...]
-<glyph unicode="H" horiz-adv-x="1448" d="M1051 0v639h-654v-639h-213v1434h213v-594h654v594h213v-1434h-213z" />
-<glyph unicode="I" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213z" />
-<glyph unicode="J" d="M506 -25q-77 0 -146 17.5t-127.5 51.5t-104.5 84.5t-77 117.5l185 90q38 -81 107 -118.5t163 -37.5q51 0 98.5 13t84 49t59 100.5t22.5 167.5v924h213v-953q0 -100 -20.5 -176.5t-55 -133t-81.5 -94t-100 -60.5t-109.5 -32.5t-110.5 -9.5z" />
-<glyph unicode="K" horiz-adv-x="1386" d="M1055 0l-404 750l-254 -308v-442h-213v1434h213v-695l553 695h246l-401 -504l510 -930h-250z" />
-<glyph unicode="L" horiz-adv-x="1190" d="M184 0v1434h213v-1229h701v-205h-914z" />
-<glyph unicode="M" horiz-adv-x="1647" d="M1249 0v784l6 215l-84 -221l-346 -803l-348 809l-86 215l6 -215v-784h-213v1434h215l385 -895l39 -105l41 105l383 895h215v-1434h-213z" />
-<glyph unicode="N" horiz-adv-x="1454" d="M1069 0l-620 973l-58 106l6 -106v-973h-213v1434h215l605 -957l59 -110l-6 108v959h213v-1434h-201z" />
-<glyph unicode="O" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -122t-9 - [...]
-<glyph unicode="P" horiz-adv-x="1315" d="M184 0v1434h576q132 0 222.5 -38.5t146 -100.5t80 -140.5t24.5 -159.5q0 -50 -11.5 -102t-35.5 -100.5t-62 -91.5t-90.5 -75t-120.5 -50.5t-153 -18.5h-363v-557h-213zM772 762q67 0 112.5 21t74 54.5t41 75t12.5 82.5q0 38 -11 79t-38 75.5t-73.5 57t-117.5 22.5h-375 v-467h375z" />
-<glyph unicode="Q" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -83 -12.5 -166.5t-39 -162t-69 -148t-102.5 -123.5 l96 -144l-163 -110l-103 151q-51 -18 -108 -28.5t-123 -10.5zM758 180q29 0 56.5 3.5t51.5 11.5l-108 163l168 111l106 -162q34 41 58.5 90.5t40.5 103t23 109t7 107.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 [...]
-<glyph unicode="R" horiz-adv-x="1384" d="M184 0v1434h604q132 0 221 -36t143 -95.5t76.5 -135.5t22.5 -157q0 -62 -18 -125t-55.5 -118.5t-96.5 -98t-141 -64.5l295 -604h-244l-282 586h-312v-586h-213zM793 791q67 0 112 18.5t73 49.5t40 70t12 81q0 37 -10.5 76t-37.5 71t-73 52t-116 20h-396v-438h396z" />
-<glyph unicode="S" horiz-adv-x="1237" d="M854 1108q-13 31 -36 57.5t-56 46t-75.5 30.5t-94.5 11q-103 0 -161 -45t-58 -127q0 -51 32 -86.5t84 -62.5t118.5 -49.5t136 -49t136 -61t118.5 -84t84 -119t32 -166.5q0 -103 -37.5 -182.5t-103.5 -134t-155.5 -83t-192.5 -28.5q-96 0 -179.5 24.5t-151 70t-116.5 110 t-76 144.5l201 73q22 -48 54.5 -88t74.5 -68.5t92.5 -44.5t108.5 -16t106 14t82 42t53 69.5t19 95.5q0 64 -32 107.5t-84 75.5t-118.5 56l-136 49t-136 56t-118.5 76t-84 108t-32 154q0 76 30.5 144t88.5 119.5t143 [...]
-<glyph unicode="T" horiz-adv-x="1208" d="M711 1229v-1229h-213v1229h-406v205h1024v-205h-405z" />
-<glyph unicode="U" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5z" />
-<glyph unicode="V" horiz-adv-x="1358" d="M786 0h-213l-491 1434h229l326 -1000l21.5 -71.5t23.5 -92.5q11 50 23.5 92.5l21.5 71.5l322 1000h227z" />
-<glyph unicode="W" horiz-adv-x="1745" d="M1653 1434l-310 -1434h-192l-248 930q-15 57 -23.5 92.5t-11.5 62.5q-3 -27 -11 -62.5t-23 -92.5l-242 -930h-193l-307 1434h219l178 -883l20.5 -100t10.5 -66q3 28 12.5 65.5l24.5 100.5l225 883h179l231 -881q17 -62 25.5 -99t11.5 -65q3 28 9.5 66t18.5 98l181 881 h215z" />
-<glyph unicode="X" horiz-adv-x="1331" d="M102 0l441 741l-410 693h246l287 -486l286 486h246l-410 -693l441 -741h-246l-317 535l-318 -535h-246z" />
-<glyph unicode="Y" horiz-adv-x="1364" d="M575 0v561l-503 873h243l367 -656l367 656h243l-504 -871v-563h-213z" />
-<glyph unicode="Z" horiz-adv-x="1356" d="M113 0v172l827 1057h-780v205h1063v-172l-830 -1057h830v-205h-1110z" />
-<glyph unicode="[" horiz-adv-x="762" d="M205 -266v1724h475v-201h-262v-1323h262v-200h-475z" />
-<glyph unicode="\" horiz-adv-x="1010" d="M774 -266l-805 1724h215l805 -1724h-215z" />
-<glyph unicode="]" horiz-adv-x="762" d="M557 -266h-475v200h262v1323h-262v201h475v-1724z" />
-<glyph unicode="^" horiz-adv-x="1290" d="M928 717l-283 471l-283 -471h-219l412 741h180l412 -741h-219z" />
-<glyph unicode="_" horiz-adv-x="1024" d="M-10 -362v184h1044v-184h-1044z" />
-<glyph unicode="`" horiz-adv-x="819" d="M451 1180l-365 319h291l291 -319h-217z" />
-<glyph unicode="a" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 -28.5t- [...]
-<glyph unicode="b" horiz-adv-x="1151" d="M614 -25q-45 0 -81.5 8.5t-66.5 23t-54 34.5t-45 43v-84h-213v1380l213 99v-512q40 45 100.5 74.5t146.5 29.5q96 0 176 -37t137.5 -107.5t89.5 -172t32 -230.5q0 -133 -30 -235.5t-86.5 -172t-137 -105.5t-181.5 -36zM588 176q61 0 107 24.5t77.5 69.5t47.5 109.5 t16 144.5q0 169 -63.5 257.5t-184.5 88.5q-41 0 -76 -11t-63.5 -28t-49 -38t-32.5 -41v-457q35 -57 92.5 -88t128.5 -31z" />
-<glyph unicode="c" horiz-adv-x="1061" d="M559 -25q-105 0 -189.5 37t-144 107.5t-91.5 172t-32 230.5t32 231t91.5 172.5t144 108t189.5 37.5q146 0 247.5 -65t147.5 -205l-202 -70q-23 60 -70 97.5t-115 37.5q-60 0 -106 -24t-77.5 -68.5t-48 -108t-16.5 -143.5t16.5 -143.5t47 -107.5t75 -67.5t101.5 -23.5 q72 0 123.5 40t73.5 114l202 -64q-42 -146 -142.5 -220.5t-256.5 -74.5z" />
-<glyph unicode="d" horiz-adv-x="1151" d="M537 -25q-99 0 -179.5 37.5t-137.5 108t-87.5 172.5t-30.5 231t32 230.5t89.5 172t137.5 107.5t176 37q86 0 146.5 -28t100.5 -70v407l213 99v-1479h-213v74q-42 -47 -100.5 -73t-146.5 -26zM567 176q72 0 127 31t90 88v461q-12 20 -32 40t-47.5 36.5t-62 27t-75.5 10.5 q-120 0 -186 -88.5t-66 -257.5q0 -76 17.5 -139.5t50 -110t79 -72.5t105.5 -26z" />
-<glyph unicode="e" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5z" />
-<glyph unicode="f" horiz-adv-x="686" d="M440 862v-862h-213v862h-155v185h155v167q0 72 21 119.5t54.5 75t76.5 38.5t88 11q62 0 112 -13t97 -36v-176q-48 20 -83 28.5t-67 8.5q-26 0 -42.5 -7t-26.5 -20t-13.5 -30t-3.5 -37v-129h236v-185h-236z" />
-<glyph unicode="g" horiz-adv-x="1151" d="M397 -246q115 7 190 27t119 50.5t61 71.5t17 89v92q-87 -109 -252 -109q-90 0 -168 34.5t-136.5 103.5t-92 171.5t-33.5 239.5q0 129 32 230.5t89.5 172t137.5 107.5t176 37q46 0 84 -8.5t68.5 -22.5t54 -31.5t40.5 -35.5v74h213v-967q0 -120 -24 -204t-82 -144 q-39 -40 -85.5 -65.5t-97 -41.5t-102.5 -23.5t-102 -11.5zM571 176q65 0 121.5 32t91.5 91v453q-16 26 -39.5 47.5t-51 37.5t-59 24.5t-63.5 8.5q-125 0 -190.5 -88.5t-65.5 -257.5q0 -80 18 -144.5t51 -109.5t80.5 -69.5t10 [...]
-<glyph unicode="h" d="M801 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1380l213 99v-533q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213z" />
-<glyph unicode="i" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM262 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="j" horiz-adv-x="524" d="M-129 -281q81 18 135.5 39.5t87.5 51.5t47.5 70.5t14.5 96.5v1070h213v-998q0 -46 -4 -91.5t-15 -88.5t-30 -81.5t-50 -70.5q-36 -38 -74 -64t-76 -43.5t-75 -29t-72 -18.5zM262 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10 t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="k" horiz-adv-x="1126" d="M154 0v1378l213 101v-756l309 324h262l-287 -291l393 -756h-233l-309 602l-135 -135v-467h-213z" />
-<glyph unicode="l" horiz-adv-x="541" d="M164 0v1378l213 101v-1479h-213z" />
-<glyph unicode="m" horiz-adv-x="1739" d="M1382 0v627q0 117 -48 180t-144 63q-43 0 -81 -16t-66.5 -48t-45 -79.5t-16.5 -110.5v-616h-213v627q0 117 -48.5 180t-144.5 63q-43 0 -80.5 -16t-66 -48t-45 -79.5t-16.5 -110.5v-616h-213v1047h213v-97q45 65 114 93t150 28q86 0 159.5 -34.5t118.5 -114.5 q50 77 128 113t182 36q80 0 148.5 -23t119 -71t79.5 -123t29 -178v-676h-213z" />
-<glyph unicode="n" d="M801 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1047h213v-101q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213z" />
-<glyph unicode="o" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24z" />
-<glyph unicode="p" horiz-adv-x="1151" d="M618 -25q-46 0 -83.5 9t-68.5 25t-55.5 36.5t-43.5 44.5v-403l-213 -99v1459h213v-84q41 46 101 77t150 31q99 0 179 -37.5t136 -108t86 -172.5t30 -231t-31.5 -230.5t-88 -172t-136 -107.5t-175.5 -37zM588 176q121 0 184.5 89t63.5 257q0 76 -16.5 140t-48.5 110 t-78 72t-105 26q-75 0 -130.5 -30.5t-90.5 -87.5v-455q18 -24 40.5 -46t50 -38.5t60 -26.5t70.5 -10z" />
-<glyph unicode="q" horiz-adv-x="1151" d="M537 -25q-96 0 -176 37t-137.5 107.5t-89.5 172t-32 230.5t30.5 231t87.5 172.5t137.5 108t179.5 37.5q87 0 146.5 -31t100.5 -77v84h213v-1360l-213 -97v500q-42 -51 -101.5 -83t-145.5 -32zM563 176q41 0 75 11.5t61.5 29t48.5 39t36 41.5v455q-37 60 -93.5 89 t-127.5 29q-59 0 -105 -26t-78 -72t-48.5 -110t-16.5 -140q0 -168 63.5 -257t184.5 -89z" />
-<glyph unicode="r" horiz-adv-x="827" d="M154 0v1047h213v-127q37 80 96 115.5t137 35.5q104 0 176 -63l-31 -197q-29 20 -68.5 35.5t-96.5 15.5q-36 0 -73.5 -12.5t-68.5 -44t-51 -84t-20 -133.5v-588h-213z" />
-<glyph unicode="s" horiz-adv-x="973" d="M477 -25q-62 0 -121 12t-110.5 34t-93.5 53.5t-70 70.5l151 129q16 -15 39 -33.5t53 -34.5t67 -27t81 -11q40 0 74 6t58.5 19t38 34.5t13.5 53.5q0 37 -25.5 63.5t-66.5 48t-93 41t-105 41.5q-49 21 -96 46t-83.5 60t-59 83.5t-22.5 117.5q0 70 28.5 124t78 90.5 t116 55.5t142.5 19q67 0 123.5 -13t102 -35t80.5 -50.5t61 -59.5l-154 -122q-14 14 -33.5 29.5t-45.5 28.5t-59 21.5t-75 8.5q-68 0 -107.5 -25t-39.5 -72q0 -30 21 -52.5t57 -41.5t82.5 -36.5l97.5 -37.5q54 -21 106 -47t92 [...]
-<glyph unicode="t" horiz-adv-x="776" d="M457 -25q-62 0 -106.5 18.5t-73 52.5t-41.5 82.5t-13 107.5v626h-162v185h162v329l213 103v-432h248v-185h-248v-571q0 -26 4.5 -48t16 -38t31.5 -24.5t51 -8.5q76 0 155 49l-26 -198q-44 -22 -96 -35t-115 -13z" />
-<glyph unicode="u" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213z" />
-<glyph unicode="v" horiz-adv-x="1067" d="M633 0h-199l-383 1047h213l242 -687l15.5 -47t13.5 -45q6 25 12.5 45l15.5 47l240 687h213z" />
-<glyph unicode="w" horiz-adv-x="1487" d="M1137 0h-189l-174 608l-10.5 37t-10 36.5t-8 32.5t-4.5 25q-2 -10 -4.5 -25.5t-6.5 -33.5t-9 -37l-10 -37l-172 -606h-189l-278 1047h209l147 -652q8 -34 12.5 -59.5l8.5 -46.5q3 21 8.5 48t13.5 58l174 652h197l174 -652q8 -31 13.5 -58t8.5 -48q3 21 8 46.5t13 59.5 l147 652h209z" />
-<glyph unicode="x" horiz-adv-x="1085" d="M770 0l-174 270q-17 26 -29.5 50.5l-23.5 44.5q-10 -20 -23.5 -44.5t-30.5 -50.5l-174 -270h-243l348 545l-320 502h244l145 -228l30.5 -51t23.5 -43l23.5 44t29.5 50l145 228h244l-321 -502l350 -545h-244z" />
-<glyph unicode="y" horiz-adv-x="1102" d="M498 -397h-217l161 415l-391 1029h211l258 -685q11 -29 17 -48t12 -44q6 25 12 44t17 48l260 685h213z" />
-<glyph unicode="z" horiz-adv-x="1047" d="M113 0v174l555 684h-514v189h770v-168l-555 -691h553v-188h-809z" />
-<glyph unicode="{" horiz-adv-x="750" d="M668 -266q-98 2 -177.5 33t-136 90.5t-87.5 148t-31 205.5v121q0 77 -31.5 124.5t-91.5 47.5h-31v186h31q60 0 91.5 47.5t31.5 124.5v119q0 117 31 205.5t87.5 148t136 90.5t177.5 33v-188q-110 -5 -164.5 -69.5t-54.5 -182.5v-133q0 -76 -13 -128t-37.5 -85t-61 -51 t-83.5 -25q47 -8 83.5 -25.5t61 -50.5t37.5 -85t13 -128v-133q0 -118 54.5 -182.5t164.5 -69.5v-188z" />
-<glyph unicode="|" horiz-adv-x="561" d="M174 -420v2069h213v-2069h-213z" />
-<glyph unicode="}" horiz-adv-x="750" d="M82 -78q109 5 164 69.5t55 182.5v133q0 76 13 128t37.5 85t61 50.5t83.5 25.5q-47 7 -83.5 25t-61 51t-37.5 85t-13 128v133q0 118 -55 182.5t-164 69.5v188q97 -2 177 -33t136.5 -90.5t87.5 -148t31 -205.5v-119q0 -77 31.5 -124.5t91.5 -47.5h31v-186h-31 q-60 0 -91.5 -47.5t-31.5 -124.5v-121q0 -117 -31 -205.5t-87.5 -148t-136.5 -90.5t-177 -33v188z" />
-<glyph unicode="~" horiz-adv-x="1290" d="M852 541q-65 0 -121 18.5t-105.5 40.5l-95 40.5t-90.5 18.5q-55 0 -86.5 -33t-36.5 -85h-153q0 64 15 122.5t48 103t85 71t126 26.5q65 0 121 -18.5t105.5 -41t95 -41t90.5 -18.5q56 0 87 33.5t36 85.5h153q0 -64 -15 -122.5t-47.5 -103t-84.5 -71t-127 -26.5z" />
-<glyph unicode="¡" horiz-adv-x="565" d="M176 -410v453l60 608h94l59 -608v-453h-213zM283 770q-33 0 -61 12t-49 32.5t-32.5 47.5t-11.5 58q0 30 11.5 57.5t32.5 48.5t49 33t61 12q32 0 60.5 -12t49.5 -33t33 -48.5t12 -57.5q0 -31 -12 -58t-33 -47.5t-49.5 -32.5t-60.5 -12z" />
-<glyph unicode="¢" horiz-adv-x="1061" d="M434 -25v209q-78 20 -139.5 65t-104 112.5t-65.5 157t-23 198.5q0 108 23 197.5t65.5 157t104 112.5t139.5 65v209h205v-198q114 -17 194.5 -81t120.5 -184l-202 -69q-23 60 -70 97.5t-115 37.5q-60 0 -106 -24t-77.5 -68.5t-48 -108t-16.5 -143.5t16.5 -143.5t47 -107.5 t75 -67.5t101.5 -23.5q72 0 123.5 39.5t73.5 113.5l202 -63q-74 -254 -319 -289v-201h-205z" />
-<glyph unicode="£" horiz-adv-x="1206" d="M164 0v213q51 24 84.5 68t53.5 96.5t28 106.5t8 96h-184v188h135l-26.5 56t-24 62.5t-17 73t-6.5 87.5q0 78 28.5 152t87 131.5t148 92.5t211.5 35q71 0 130 -9.5t111 -27.5t99.5 -45t95.5 -61l-84 -203l-56 47t-72.5 46t-96.5 34.5t-127 13.5q-68 0 -116 -18t-78.5 -47 t-45 -66t-14.5 -75t9 -71.5t22.5 -66.5t29 -67.5t29.5 -73.5h342v-188h-309q0 -104 -36.5 -199t-114.5 -176h737v-205h-981z" />
-<glyph unicode="¤" horiz-adv-x="1290" d="M645 176q-161 0 -274 74l-138 -139l-159 159l145 146q-31 55 -46 122t-15 144t16 144t47 124l-147 148l153 153l140 -139q57 37 127 57.5t151 20.5q79 0 148.5 -19.5t126.5 -56.5l137 137l160 -159l-144 -144q29 -57 45.5 -124t16.5 -142q0 -78 -17 -146.5t-49 -125.5 l148 -146l-154 -153l-141 141q-57 -37 -126.5 -56.5t-150.5 -19.5zM645 381q61 0 111.5 22.5t86.5 62.5t55.5 95.5t19.5 120.5t-19.5 120.5t-55.5 96t-86.5 63.5t-111.5 23t-110.5 -23t-85 -63.5t-55 -96t-19.5 - [...]
-<glyph unicode="¥" horiz-adv-x="1174" d="M702 674h283v-185h-293v-135h293v-184h-293v-170h-213v170h-301v184h301v135h-301v185h289l-436 760h241l314 -564l313 564h244z" />
-<glyph unicode="¦" horiz-adv-x="561" d="M174 778v697h213v-697h-213zM174 -246v697h213v-697h-213z" />
-<glyph unicode="§" horiz-adv-x="1071" d="M543 -246q-75 0 -141 14t-122.5 39.5t-101.5 60.5t-76 77l156 133q18 -18 44.5 -38.5t61.5 -38.5t79.5 -30t97.5 -12q43 0 81 7t66 23.5t44 43t16 65.5q0 42 -28.5 73.5t-75 58.5t-105 50.5l-117.5 49.5q-56 24 -109 50t-94 62t-66.5 85t-25.5 120q0 76 37.5 133t99.5 88 q-66 45 -101.5 111.5t-35.5 153.5q0 78 31 138.5t85 102t127.5 63t157.5 21.5q75 0 136 -14t110.5 -38t87.5 -55t66 -65l-162 -129q-15 16 -36.5 33t-51 31t-66.5 23t-84 9q-83 0 -131.5 -31t-48.5 -89q0 -34 [...]
-<glyph unicode="¨" horiz-adv-x="819" d="M225 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM594 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10 t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="©" horiz-adv-x="1729" d="M872 270q-101 0 -170 40t-111 103.5t-60.5 143.5t-18.5 160q0 49 8 101.5t26 102t46 93.5t68 77t92.5 52.5t119.5 19.5q62 0 112 -16t87.5 -42t63 -59t39.5 -67l-136 -64q-32 49 -68.5 75t-97.5 26q-57 0 -96 -28t-62.5 -71.5t-33.5 -96.5t-10 -103q0 -53 11.5 -106.5 t36.5 -96.5t63 -69.5t91 -26.5q58 0 96.5 30t69.5 76l138 -49q-20 -45 -48 -83t-65.5 -65t-85 -42t-105.5 -15zM864 -25q-158 0 -293 57t-235 156.5t-156.5 235t-56.5 293.5q0 105 25.5 200.5t73 178t114.5 149.5 [...]
-<glyph unicode="ª" horiz-adv-x="895" d="M602 580v61q-17 -15 -40.5 -30t-52.5 -26.5t-62.5 -18.5t-69.5 -7q-65 0 -121 18.5t-97 54.5t-64 90t-23 126q0 75 29 129t76 89t105.5 52t118.5 17q54 0 106.5 -11.5t94.5 -31.5v34q0 94 -42 132t-130 38q-74 0 -133 -21t-109 -58l-53 151q54 38 134.5 64t183.5 26 q55 0 113.5 -10t106.5 -43.5t78.5 -97.5t30.5 -172v-555h-180zM410 721q28 0 55.5 6.5t52.5 18t46.5 26t37.5 29.5v135q-18 8 -39 15t-44 12.5t-45.5 8.5t-43.5 3q-34 0 -65.5 -7t-55.5 -22.5t-38.5 -39.5t-14.5 -58 [...]
-<glyph unicode="«" horiz-adv-x="1073" d="M373 0l-312 524l312 523h231l-311 -523l311 -524h-231zM811 0l-311 524l311 523h231l-311 -523l311 -524h-231z" />
-<glyph unicode="¬" horiz-adv-x="1290" d="M913 344v420h-749v205h962v-625h-213z" />
-<glyph unicode="­" horiz-adv-x="809" d="M154 506v205h501v-205h-501z" />
-<glyph unicode="®" horiz-adv-x="1729" d="M571 285v864h340q81 0 135.5 -22t87.5 -58t47.5 -82.5t14.5 -95.5q0 -38 -10.5 -75.5t-32.5 -70.5t-56 -59t-81 -41l88 -180.5l88 -179.5h-176l-166 346h-127v-346h-152zM911 776q36 0 60.5 9t38.5 25t20 37t6 44q0 20 -5 40t-19 36.5t-38 26.5t-63 10h-188v-228h188z M864 -25q-158 0 -293 57t-235 156.5t-156.5 235t-56.5 293.5q0 105 25.5 200.5t73 178t114.5 149.5t149.5 114.5t178 73t200.5 25.5t201 -25.5t178 -73t149.5 -114.5t114.5 -149.5t73 -178t26 -200.5t-26 -201t-7 [...]
-<glyph unicode="¯" horiz-adv-x="819" d="M72 1223v176h676v-176h-676z" />
-<glyph unicode="°" horiz-adv-x="819" d="M410 1124q32 0 59.5 12.5t47.5 34t31 50.5t11 63t-11 63t-31 50t-47.5 33t-59.5 12t-60 -12t-48.5 -33t-32 -50t-11.5 -63t11.5 -63t32 -50.5t48.5 -34t60 -12.5zM410 928q-75 0 -141 27.5t-115 75.5t-77 113t-28 140q0 76 28 141t77 112t115 74t141 27t140.5 -27 t113.5 -74t76 -112t28 -141q0 -75 -28 -140t-76 -113t-113.5 -75.5t-140.5 -27.5z" />
-<glyph unicode="±" horiz-adv-x="1290" d="M752 723v-317h-213v317h-375v205h375v317h213v-317h374v-205h-374zM164 0v205h962v-205h-962z" />
-<glyph unicode="²" horiz-adv-x="731" d="M51 909q0 92 24 168t65.5 136t96 105t115.5 77q25 13 47 26t39 28t27 34t10 45q0 40 -30 70t-87 30q-58 0 -94.5 -35t-48.5 -98l-166 39q11 54 39 100.5t68.5 80.5t92 53.5t109.5 19.5q69 0 123.5 -19.5t92.5 -55t58.5 -84t20.5 -105.5t-18 -98t-49.5 -72t-74 -56.5 t-91.5 -50.5q-71 -37 -109.5 -82.5t-60.5 -95.5h401v-160h-600z" />
-<glyph unicode="³" horiz-adv-x="711" d="M338 893q-119 0 -199 66.5t-114 185.5l168 45q17 -72 60 -104.5t91 -32.5q53 0 88 28t35 82q0 22 -7.5 42t-23.5 35.5t-41 24.5t-59 9h-64v160h62q54 0 78 28t24 68q0 42 -25 70t-73 28t-75.5 -27.5t-43.5 -72.5l-164 59q37 91 110.5 146t176.5 55q64 0 114 -20 t84.5 -54.5t53 -79.5t18.5 -96q0 -53 -27.5 -101t-72.5 -77q62 -28 96.5 -82t34.5 -117q0 -62 -22.5 -111.5t-62.5 -84t-96.5 -53.5t-123.5 -19z" />
-<glyph unicode="´" horiz-adv-x="819" d="M369 1180h-217l290 319h291z" />
-<glyph unicode="µ" d="M791 0v100q-51 -65 -115 -95t-135 -30q-122 0 -185 84v-368l-213 -101v1457h213v-625q0 -72 15 -119.5t42.5 -75.5t66 -39.5t85.5 -11.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213z" />
-<glyph unicode="¶" horiz-adv-x="1286" d="M909 -244v1473h-176v-1542l-213 -97v975q-130 0 -221 39.5t-148.5 102.5t-83.5 140t-26 152q0 81 24.5 159t80 139.5t146 99t222.5 37.5h608v-1581z" />
-<glyph unicode="·" horiz-adv-x="457" d="M227 565q-33 0 -61 12t-48.5 32.5t-32 47.5t-11.5 58t11.5 58.5t32 48t48.5 32.5t61 12q32 0 60.5 -12t49.5 -32.5t33.5 -48t12.5 -58.5t-12.5 -58t-33.5 -47.5t-49.5 -32.5t-60.5 -12z" />
-<glyph unicode="¸" horiz-adv-x="819" d="M399 -489q-74 0 -123.5 21.5t-80.5 55.5l104 76q12 -15 31.5 -23t48.5 -8q46 0 74 25t28 66q0 38 -31.5 63t-78.5 25q-49 0 -82 -21l-41 51l143 178h117l-76 -122q60 0 102.5 -14.5t70 -38.5t40 -56t12.5 -68q0 -46 -18 -84.5t-51.5 -66.5t-81.5 -43.5t-107 -15.5z" />
-<glyph unicode="¹" horiz-adv-x="410" d="M133 909v684h-113v125q32 0 55.5 3.5t40.5 10t29 17t21 25.5h135v-865h-168z" />
-<glyph unicode="º" horiz-adv-x="950" d="M473 559q-90 0 -162.5 30.5t-123 88t-78 141t-27.5 189.5t27.5 189.5t78 141.5t123 88.5t162.5 30.5t163 -30.5t124.5 -88.5t79.5 -141.5t28 -189.5t-28 -189.5t-79.5 -141t-124.5 -88t-163 -30.5zM473 733q100 0 154.5 72.5t54.5 202.5q0 131 -54.5 203.5t-154.5 72.5 q-97 0 -152 -72.5t-55 -203.5q0 -129 55 -202t152 -73z" />
-<glyph unicode="»" horiz-adv-x="1073" d="M469 0l311 524l-311 523h231l312 -523l-312 -524h-231zM31 0l311 524l-311 523h231l311 -523l-311 -524h-231z" />
-<glyph unicode="¼" horiz-adv-x="1405" d="M20 -25l979 1483h195l-979 -1483h-195zM143 580v684h-113v125q32 0 55.5 3t40.5 10t29 17t21 25h135v-864h-168zM1084 0v176h-435v109l441 581h157v-532h111v-158h-111v-176h-163zM1084 334v291l-224 -291h224z" />
-<glyph unicode="½" horiz-adv-x="1495" d="M20 -25l979 1483h195l-979 -1483h-195zM143 580v684h-113v125q32 0 55.5 3t40.5 10t29 17t21 25h135v-864h-168zM815 0q0 92 24 167.5t65.5 135.5t96 105t115.5 77q25 13 47 26t39 28.5t27 34.5t10 44q0 41 -30 71t-87 30q-58 0 -94.5 -35t-48.5 -98l-166 39 q11 54 39 100.5t68.5 80.5t92 53.5t109.5 19.5q69 0 123.5 -20t92.5 -55t58.5 -83.5t20.5 -106.5q0 -57 -18 -97.5t-49.5 -71.5t-74 -56.5t-91.5 -50.5q-71 -37 -109.5 -83t-60.5 -95h401v-160h-600z" />
-<glyph unicode="¾" horiz-adv-x="1651" d="M266 -25l979 1483h195l-979 -1483h-195zM338 563q-119 0 -199 66.5t-114 185.5l168 45q17 -72 60 -104.5t91 -32.5q53 0 88 28.5t35 82.5q0 21 -7.5 41t-23.5 35.5t-41 24.5t-59 9h-64v160h62q54 0 78 28t24 68q0 42 -25 70t-73 28t-75.5 -27.5t-43.5 -72.5l-164 59 q37 91 110.5 146t176.5 55q64 0 114 -20t84.5 -54.5t53 -79.5t18.5 -96q0 -52 -27.5 -100.5t-72.5 -77.5q62 -28 96.5 -82t34.5 -117q0 -61 -22.5 -110.5t-62.5 -84.5t-96.5 -54t-123.5 -19zM1330 0v176h-435v109l4 [...]
-<glyph unicode="¿" horiz-adv-x="1026" d="M639 659v-41q0 -73 -15 -128t-40 -97.5t-57 -75t-65 -61l-65 -54.5t-57 -56t-40 -66t-15 -84q0 -45 15 -82.5t43.5 -64.5t70.5 -42.5t96 -15.5q125 0 193.5 73t68.5 206h213q0 -112 -35 -201t-98 -151t-150.5 -95t-191.5 -33q-107 0 -188.5 31t-137 86t-84 129t-28.5 160 q0 84 26 142t65.5 102.5t85.5 81.5t85.5 78.5t65.5 93.5t26 127v38h213zM535 770q-33 0 -61.5 12t-49 32.5t-32 47.5t-11.5 58q0 30 11.5 57.5t32 48.5t49 33t61.5 12q32 0 60.5 -12t49.5 -33t33 -48.5t12 -57 [...]
-<glyph unicode="À" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM754 1569l-365 319h291l291 -319h-217z" />
-<glyph unicode="Á" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM672 1569h-217l290 319h291z" />
-<glyph unicode="Â" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM905 1569l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ã" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM874 1591q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5 t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -110.5t-37.5 -84t-67 -53t-99 -18.5z" />
-<glyph unicode="Ä" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM528 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41 t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM897 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -2 [...]
-<glyph unicode="Å" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l475 1317q-46 35 -73 87t-27 114q0 51 20 96t54.5 78.5t80 53t97.5 19.5q51 0 97 -19.5t80.5 -53t54.5 -78.5t20 -96q0 -62 -28 -113.5t-73 -85.5l475 -1319h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5 l-177 -488h449zM713 1421q38 0 65 27.5t27 69.5q0 41 -27 68.5t-65 27.5t-65 -27.5t-27 -68.5q0 -42 27 -69.5t65 -27.5z" />
-<glyph unicode="Æ" horiz-adv-x="1718" d="M827 0v330h-442l-186 -330h-230l832 1434h801v-201h-562v-393h322v-201h-322v-438h607v-201h-820zM827 520v598q-8 -18 -18.5 -39l-21.5 -42.5l-22.5 -42l-21.5 -38.5l-247 -436h331z" />
-<glyph unicode="Ç" horiz-adv-x="1331" d="M709 -489q-75 0 -124.5 21.5t-80.5 55.5l104 76q13 -15 32 -23t48 -8q47 0 75 25t28 66q0 38 -32 63t-79 25q-48 0 -82 -21l-41 51l109 138q-99 8 -177 42t-137 88t-101 124t-68 149t-38 164t-12 170q0 82 13.5 169t43 169.5t76.5 155.5t113 128t153 87t197 32 q101 0 182 -24.5t141.5 -64.5t101.5 -91t63 -104l-195 -91q-26 41 -54.5 72.5t-63 53.5t-77.5 33t-98 11q-72 0 -127.5 -23t-97 -63t-70.5 -93t-46.5 -112.5t-25.5 -122.5t-8 -122q0 -95 21.5 -191t67.5 -173t117 -125t1 [...]
-<glyph unicode="È" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM625 1569l-365 319h291l291 -319h-217z" />
-<glyph unicode="É" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM625 1569h-217l290 319h291z" />
-<glyph unicode="Ê" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM817 1569l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ë" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM440 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM809 1599q-27 0 -50.5 10 t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="Ì" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM343 1569l-365 319h291l291 -319h-217z" />
-<glyph unicode="Í" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM261 1569h-217l290 319h291z" />
-<glyph unicode="Î" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM494 1569l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ï" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM117 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM486 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5 t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="Ð" horiz-adv-x="1403" d="M184 618h-133v197h133v619h402q125 0 224 -28.5t175 -78.5t130 -118.5t88.5 -148t50.5 -167.5t16 -176q0 -82 -14.5 -167.5t-48 -165t-86.5 -149.5t-130 -122.5t-179.5 -82.5t-233.5 -30h-394v618zM612 201q82 0 146.5 21.5t113 58.5t82 87t55 107.5t31 119t9.5 122.5 q0 97 -23.5 189.5t-73.5 165t-128.5 117t-188.5 44.5h-238v-418h310v-197h-310v-417h215z" />
-<glyph unicode="Ñ" horiz-adv-x="1454" d="M1069 0l-620 973l-58 106l6 -106v-973h-213v1434h215l605 -957l59 -110l-6 108v959h213v-1434h-201zM909 1591q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5 t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -110.5t-37.5 -84t-67 -53t-99 -18.5z" />
-<glyph unicode="Ò" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -122 [...]
-<glyph unicode="Ó" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -122 [...]
-<glyph unicode="Ô" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -122 [...]
-<glyph unicode="Õ" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -122 [...]
-<glyph unicode="Ö" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -122 [...]
-<glyph unicode="×" horiz-adv-x="1290" d="M647 553l-323 -326l-154 154l326 324l-324 323l147 148l324 -324l324 326l153 -154l-325 -324l323 -323l-147 -148z" />
-<glyph unicode="Ø" horiz-adv-x="1495" d="M133 -25l146 213q-42 54 -71.5 117t-48 131t-27.5 139t-9 142q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5q103 0 186 -24.5t149 -67.5l64 92h215l-148 -215q42 -54 71.5 -116.5t49 -130.5t28.5 -138.5t9 -140.5q0 -89 -14.5 -178.5t-45.5 -172 t-79.5 -154t-118 -124t-160.5 -83t-206 -30.5q-104 0 -187 24.5t-149 68.5l-64 -93h-215zM344 717q0 -85 17 -170.5t55 -159.5l547 801q-44 31 -97 48t-118 17q-75 0 -134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t- [...]
-<glyph unicode="Ù" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM754 1569l-365 319h291 l291 -319h-217z" />
-<glyph unicode="Ú" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM713 1569h-217l290 319 h291z" />
-<glyph unicode="Û" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM905 1569l-192 161 l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ü" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM528 1599q-27 0 -50.5 10 t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM897 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5 [...]
-<glyph unicode="Ý" horiz-adv-x="1364" d="M575 0v561l-503 873h243l367 -656l367 656h243l-504 -871v-563h-213zM682 1569h-217l290 319h291z" />
-<glyph unicode="Þ" horiz-adv-x="1315" d="M184 0v1434h213v-267h363q132 0 222.5 -38t146 -100t80 -140.5t24.5 -159.5q0 -75 -26 -153t-83 -141.5t-147 -103.5t-217 -40h-363v-291h-213zM772 496q67 0 112.5 21t74 54t41 74.5t12.5 83.5q0 37 -11 78.5t-38 76t-73.5 57t-117.5 22.5h-375v-467h375z" />
-<glyph unicode="ß" horiz-adv-x="1143" d="M627 -25q-57 0 -106 9t-89 24l109 176q42 -12 88 -12q37 0 76 15t71 46t52 79t20 114q0 48 -15.5 94t-49.5 81.5t-88 57t-130 21.5h-45v197h45q45 0 83.5 12t66.5 36.5t43.5 61t15.5 84.5q0 88 -53 139.5t-152 51.5q-48 0 -89 -15.5t-71 -49.5t-46.5 -89t-16.5 -133 v-1286l-213 -99v1379q0 133 37.5 226.5t98.5 151.5t139 84.5t159 26.5q94 0 171 -27t132.5 -76t86 -118t30.5 -154q0 -48 -11.5 -93.5t-35 -84t-59.5 -68.5t-86 -49q64 -19 113.5 -53t83.5 -80t51.5 -102.5t17.5 -1 [...]
-<glyph unicode="à" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 -2 [...]
-<glyph unicode="á" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 -2 [...]
-<glyph unicode="â" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 -2 [...]
-<glyph unicode="ã" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 -2 [...]
-<glyph unicode="ä" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 -2 [...]
-<glyph unicode="å" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 -2 [...]
-<glyph unicode="æ" horiz-adv-x="1757" d="M1214 -25q-118 0 -210.5 43t-155.5 123q-48 -49 -99 -81t-101 -51t-97 -26.5t-86 -7.5q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5 q-98 0 -169.5 -26t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q41 0 85.5 -5t87 -19.5t80.5 -41t67 -69.5q63 66 147.5 100.5t189.5 34.5q93 0 175 -29.5t143.5 -90t97 -152.5t35.5 -217q0 -34 -1.5 - [...]
-<glyph unicode="ç" horiz-adv-x="1061" d="M528 -489q-74 0 -123.5 21.5t-80.5 55.5l104 76q12 -15 31.5 -23t48.5 -8q46 0 74 25t28 66q0 38 -31.5 63t-78.5 25q-49 0 -82 -21l-41 51l110 140q-90 11 -161 53t-121 111t-76.5 164t-26.5 212q0 129 32 231t91.5 172.5t144 108t189.5 37.5q146 0 247.5 -65 t147.5 -205l-202 -70q-23 60 -70 97.5t-115 37.5q-60 0 -106 -24t-77.5 -68.5t-48 -108t-16.5 -143.5t16.5 -143.5t47 -107.5t75 -67.5t101.5 -23.5q72 0 123.5 40t73.5 114l202 -64q-38 -134 -126 -207t-222 -86l-49 -7 [...]
-<glyph unicode="è" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM621 1180l-365 319h291l291 -319h-217z" />
-<glyph unicode="é" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM539 1180h-217l290 319h291z" />
-<glyph unicode="ê" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM772 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="ë" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM395 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27 [...]
-<glyph unicode="ì" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM305 1180l-365 319h291l291 -319h-217z" />
-<glyph unicode="í" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM223 1180h-217l290 319h291z" />
-<glyph unicode="î" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM456 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="ï" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM79 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM448 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5 t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="ð" horiz-adv-x="1161" d="M862 1391l-96 -101q75 -61 130 -140.5t91 -174t54 -201.5t18 -223q0 -120 -29 -225t-88 -183t-149 -123t-213 -45q-103 0 -190.5 36t-151.5 100t-100 151.5t-36 190.5q0 106 36.5 194t99 152t145.5 99.5t176 35.5q47 0 90.5 -10.5t80 -29.5t65 -45.5t45.5 -57.5 q-25 118 -77.5 209t-129.5 153l-127 -133l-113 106l103 107q-51 20 -106.5 31t-115.5 14v180q104 -3 192.5 -24t164.5 -58l119 123zM578 733q-57 0 -105 -20t-83.5 -57t-55 -88.5t-19.5 -114.5q0 -59 20.5 -110t56.5 -8 [...]
-<glyph unicode="ñ" d="M801 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1047h213v-101q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213zM751 1202q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5 t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -110.5t-37.5 -84t-67 -53t-99 -18.5z" />
-<glyph unicode="ò" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM621 1180l-365 319h291l291 -319h-217z" />
-<glyph unicode="ó" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM539 1180h-217l290 319h291z" />
-<glyph unicode="ô" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM772 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="õ" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM741 1202q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-10 [...]
-<glyph unicode="ö" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM395 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 [...]
-<glyph unicode="÷" horiz-adv-x="1290" d="M164 600v205h962v-205h-962zM643 965q-29 0 -53 10t-42 27.5t-28 41t-10 50.5t10 50.5t28 41.5t42 28.5t53 10.5q28 0 53 -10.5t43 -28.5t28.5 -41.5t10.5 -50.5t-10.5 -50.5t-28.5 -41t-43 -27.5t-53 -10zM643 180q-29 0 -53 10t-42 27.5t-28 41t-10 50.5t10 51t28 41.5 t42 28t53 10.5q28 0 53 -10.5t43 -28t28.5 -41.5t10.5 -51t-10.5 -50.5t-28.5 -41t-43 -27.5t-53 -10z" />
-<glyph unicode="ø" horiz-adv-x="1161" d="M102 -25l113 164q-54 71 -83.5 166.5t-29.5 216.5q0 129 33.5 231t95.5 172.5t150.5 108t198.5 37.5q139 0 243 -59l41 59h195l-115 -164q56 -71 85.5 -167.5t29.5 -217.5q0 -129 -34 -230.5t-96 -172t-151 -107.5t-198 -37q-72 0 -132.5 15.5t-111.5 44.5l-41 -60h-193z M580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145q0 56 -9 104.5t-28 88.5l-352 -508q52 -31 123 -31zM315 522q0 -111 33 -190l354 508q-26 15 -57 22.5t-65 7.5q-61 0 -110 -24t-83.5 -69t-53 -109.5t-18.5 [...]
-<glyph unicode="ù" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM611 1180l-365 319h291l291 -319h-217z" />
-<glyph unicode="ú" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM529 1180h-217l290 319h291z" />
-<glyph unicode="û" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM762 1180l-192 161l-193 -161h-205l291 319h215 l289 -319h-205z" />
-<glyph unicode="ü" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM385 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5 t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM754 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27 [...]
-<glyph unicode="ý" horiz-adv-x="1102" d="M498 -397h-217l161 415l-391 1029h211l258 -685q11 -29 17 -48t12 -44q6 25 12 44t17 48l260 685h213zM508 1180h-217l290 319h291z" />
-<glyph unicode="þ" horiz-adv-x="1151" d="M618 -25q-46 0 -83.5 9t-68.5 25t-55.5 36.5t-43.5 44.5v-403l-213 -99v1770l213 100v-495q41 46 101 77t150 31q99 0 179 -37.5t136 -108t86 -172.5t30 -231t-31.5 -230.5t-88 -172t-136 -107.5t-175.5 -37zM588 176q121 0 184.5 89t63.5 257q0 76 -16.5 140t-48.5 110 t-78 72t-105 26q-75 0 -130.5 -30.5t-90.5 -87.5v-455q18 -24 40.5 -46t50 -38.5t60 -26.5t70.5 -10z" />
-<glyph unicode="ÿ" horiz-adv-x="1102" d="M498 -397h-217l161 415l-391 1029h211l258 -685q11 -29 17 -48t12 -44q6 25 12 44t17 48l260 685h213zM364 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5 t-50.5 -10zM733 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="Ā" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM375 1612v176h676v-176h-676z" />
-<glyph unicode="ā" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 - [...]
-<glyph unicode="Ă" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM713 1569q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 -35.5t84 -13t84 13 t63 35.5t39.5 53t13.5 66.5h181q0 -69 -22.5 -131t-69.5 -108.5t-119 -73.5t-170 -27z" />
-<glyph unicode="ă" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 - [...]
-<glyph unicode="Ą" horiz-adv-x="1425" d="M1120 -489q-55 0 -100.5 14t-78.5 41t-51 66t-18 89q0 91 62 162.5t184 116.5l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434q-54 0 -106 -22t-93 -58t-66 -83t-25 -97q0 -53 29.5 -80t72.5 -27q29 0 50.5 12t33.5 27l104 -75q-31 -35 -84 -60.5t-135 -25.5z M764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449z" />
-<glyph unicode="ą" horiz-adv-x="1090" d="M770 -489q-55 0 -100.5 14t-78.5 41t-51 66t-18 89q0 93 60.5 163.5t169.5 115.5v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58 q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678q-65 -13 -113.5 [...]
-<glyph unicode="Ć" horiz-adv-x="1331" d="M729 -25q-111 0 -198.5 30.5t-153.5 83t-112.5 124t-75.5 154t-42.5 172t-13.5 178.5q0 82 13.5 169t43 169.5t76.5 155.5t113 128t153 87t197 32q101 0 182 -24.5t141.5 -64.5t101.5 -91t63 -104l-195 -91q-26 41 -54.5 72.5t-63 53.5t-77.5 33t-98 11q-72 0 -127.5 -23 t-97 -63t-70.5 -93t-46.5 -112.5t-25.5 -122.5t-8 -122q0 -95 21.5 -191t67.5 -173t117 -125t169 -48q53 0 95.5 13.5t78 38t64 58.5t53.5 75l199 -76q-30 -70 -74.5 -128t-105 -99t-138 -64t-172.5 -23zM729 [...]
-<glyph unicode="ć" horiz-adv-x="1061" d="M559 -25q-105 0 -189.5 37t-144 107.5t-91.5 172t-32 230.5t32 231t91.5 172.5t144 108t189.5 37.5q146 0 247.5 -65t147.5 -205l-202 -70q-23 60 -70 97.5t-115 37.5q-60 0 -106 -24t-77.5 -68.5t-48 -108t-16.5 -143.5t16.5 -143.5t47 -107.5t75 -67.5t101.5 -23.5 q72 0 123.5 40t73.5 114l202 -64q-42 -146 -142.5 -220.5t-256.5 -74.5zM512 1180h-217l290 319h291z" />
-<glyph unicode="Ĉ" horiz-adv-x="1331" d="M729 -25q-111 0 -198.5 30.5t-153.5 83t-112.5 124t-75.5 154t-42.5 172t-13.5 178.5q0 82 13.5 169t43 169.5t76.5 155.5t113 128t153 87t197 32q101 0 182 -24.5t141.5 -64.5t101.5 -91t63 -104l-195 -91q-26 41 -54.5 72.5t-63 53.5t-77.5 33t-98 11q-72 0 -127.5 -23 t-97 -63t-70.5 -93t-46.5 -112.5t-25.5 -122.5t-8 -122q0 -95 21.5 -191t67.5 -173t117 -125t169 -48q53 0 95.5 13.5t78 38t64 58.5t53.5 75l199 -76q-30 -70 -74.5 -128t-105 -99t-138 -64t-172.5 -23zM921 [...]
-<glyph unicode="ĉ" horiz-adv-x="1061" d="M559 -25q-105 0 -189.5 37t-144 107.5t-91.5 172t-32 230.5t32 231t91.5 172.5t144 108t189.5 37.5q146 0 247.5 -65t147.5 -205l-202 -70q-23 60 -70 97.5t-115 37.5q-60 0 -106 -24t-77.5 -68.5t-48 -108t-16.5 -143.5t16.5 -143.5t47 -107.5t75 -67.5t101.5 -23.5 q72 0 123.5 40t73.5 114l202 -64q-42 -146 -142.5 -220.5t-256.5 -74.5zM745 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ċ" horiz-adv-x="1331" d="M729 -25q-111 0 -198.5 30.5t-153.5 83t-112.5 124t-75.5 154t-42.5 172t-13.5 178.5q0 82 13.5 169t43 169.5t76.5 155.5t113 128t153 87t197 32q101 0 182 -24.5t141.5 -64.5t101.5 -91t63 -104l-195 -91q-26 41 -54.5 72.5t-63 53.5t-77.5 33t-98 11q-72 0 -127.5 -23 t-97 -63t-70.5 -93t-46.5 -112.5t-25.5 -122.5t-8 -122q0 -95 21.5 -191t67.5 -173t117 -125t169 -48q53 0 95.5 13.5t78 38t64 58.5t53.5 75l199 -76q-30 -70 -74.5 -128t-105 -99t-138 -64t-172.5 -23zM729 [...]
-<glyph unicode="ċ" horiz-adv-x="1061" d="M559 -25q-105 0 -189.5 37t-144 107.5t-91.5 172t-32 230.5t32 231t91.5 172.5t144 108t189.5 37.5q146 0 247.5 -65t147.5 -205l-202 -70q-23 60 -70 97.5t-115 37.5q-60 0 -106 -24t-77.5 -68.5t-48 -108t-16.5 -143.5t16.5 -143.5t47 -107.5t75 -67.5t101.5 -23.5 q72 0 123.5 40t73.5 114l202 -64q-42 -146 -142.5 -220.5t-256.5 -74.5zM553 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5 [...]
-<glyph unicode="Č" horiz-adv-x="1331" d="M729 -25q-111 0 -198.5 30.5t-153.5 83t-112.5 124t-75.5 154t-42.5 172t-13.5 178.5q0 82 13.5 169t43 169.5t76.5 155.5t113 128t153 87t197 32q101 0 182 -24.5t141.5 -64.5t101.5 -91t63 -104l-195 -91q-26 41 -54.5 72.5t-63 53.5t-77.5 33t-98 11q-72 0 -127.5 -23 t-97 -63t-70.5 -93t-46.5 -112.5t-25.5 -122.5t-8 -122q0 -95 21.5 -191t67.5 -173t117 -125t169 -48q53 0 95.5 13.5t78 38t64 58.5t53.5 75l199 -76q-30 -70 -74.5 -128t-105 -99t-138 -64t-172.5 -23zM835 [...]
-<glyph unicode="č" horiz-adv-x="1061" d="M559 -25q-105 0 -189.5 37t-144 107.5t-91.5 172t-32 230.5t32 231t91.5 172.5t144 108t189.5 37.5q146 0 247.5 -65t147.5 -205l-202 -70q-23 60 -70 97.5t-115 37.5q-60 0 -106 -24t-77.5 -68.5t-48 -108t-16.5 -143.5t16.5 -143.5t47 -107.5t75 -67.5t101.5 -23.5 q72 0 123.5 40t73.5 114l202 -64q-42 -146 -142.5 -220.5t-256.5 -74.5zM659 1180h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="Ď" horiz-adv-x="1403" d="M184 1434h402q125 0 224 -28.5t175 -78.5t130 -118.5t88.5 -148t50.5 -167.5t16 -176q0 -82 -14.5 -167.5t-48 -165t-86.5 -149.5t-130 -122.5t-179.5 -82.5t-233.5 -30h-394v1434zM612 201q82 0 146.5 21.5t113 58.5t82 87t55 107.5t31 119t9.5 122.5q0 97 -23.5 189.5 t-73.5 165t-128.5 117t-188.5 44.5h-238v-1032h215zM739 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ď" horiz-adv-x="1479" d="M537 -25q-99 0 -179.5 37.5t-137.5 108t-87.5 172.5t-30.5 231t32 230.5t89.5 172t137.5 107.5t176 37q86 0 146.5 -28t100.5 -70v407l213 99v-1479h-213v74q-42 -47 -100.5 -73t-146.5 -26zM567 176q72 0 127 31t90 88v461q-12 20 -32 40t-47.5 36.5t-62 27t-75.5 10.5 q-120 0 -186 -88.5t-66 -257.5q0 -76 17.5 -139.5t50 -110t79 -72.5t105.5 -26zM1428 1459v-16q0 -151 -50.5 -269.5t-150.5 -216.5l-66 49q14 28 25 78t19 111t12 129.5t4 134.5h207z" />
-<glyph unicode="Đ" horiz-adv-x="1403" d="M184 618h-133v197h133v619h402q125 0 224 -28.5t175 -78.5t130 -118.5t88.5 -148t50.5 -167.5t16 -176q0 -82 -14.5 -167.5t-48 -165t-86.5 -149.5t-130 -122.5t-179.5 -82.5t-233.5 -30h-394v618zM612 201q82 0 146.5 21.5t113 58.5t82 87t55 107.5t31 119t9.5 122.5 q0 97 -23.5 189.5t-73.5 165t-128.5 117t-188.5 44.5h-238v-418h310v-197h-310v-417h215z" />
-<glyph unicode="đ" horiz-adv-x="1151" d="M997 1110v-1110h-213v74q-42 -47 -100.5 -73t-146.5 -26q-99 0 -179.5 37t-137.5 105t-87.5 163.5t-30.5 213.5q0 117 32 212t89.5 161.5t137.5 102.5t176 36q86 0 146.5 -28.5t100.5 -70.5v203h-260v180h260v90l213 99v-189h123v-180h-123zM567 176q72 0 127 31t90 88 v395q-12 20 -32 40.5t-47.5 37t-62 27t-75.5 10.5q-60 0 -106.5 -21.5t-79 -62t-49.5 -98t-17 -129.5q0 -65 17.5 -122.5t50 -101t79 -69t105.5 -25.5z" />
-<glyph unicode="Ē" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM291 1612v176h676v-176h-676z" />
-<glyph unicode="ē" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM242 1223v176h676v-176h-676z" />
-<glyph unicode="Ĕ" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM629 1569q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 -35.5t84 -13t84 13t63 35.5t39.5 53t13.5 66.5h181q0 -69 -22.5 -131t-69.5 -108.5t-119 -73.5t-170 -27 z" />
-<glyph unicode="ĕ" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM580 1180q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 - [...]
-<glyph unicode="Ė" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM629 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="ė" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM580 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 2 [...]
-<glyph unicode="Ę" horiz-adv-x="1231" d="M831 -489q-55 0 -100.5 14t-78 41t-50.5 66t-18 89q0 45 16 86.5t45 77.5t71 65t93 50h-625v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-55q-74 0 -130.5 -27t-95 -66.5t-58 -84.5t-19.5 -82q0 -53 29.5 -80t72.5 -27q29 0 50.5 12t33.5 27l105 -75 q-31 -35 -84.5 -60.5t-135.5 -25.5z" />
-<glyph unicode="ę" horiz-adv-x="1120" d="M610 -489q-55 0 -100.5 14t-78.5 41t-51 66t-18 89q0 81 48 146t133 108q-102 7 -184 47.5t-139 111t-87.5 169t-30.5 221.5q0 129 34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5 t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143q-28 -35 -68 -63t-84 -54l-88 -52t-79 -57t-56.5 -70t-21.5 -91q0 -53 29.5 -80t72.5 -27q29 0 50.5 12t33.5 27l104 -75q-31 -35 -84 -60.5t-135 [...]
-<glyph unicode="Ě" horiz-adv-x="1231" d="M184 0v1434h889v-201h-676v-393h437v-201h-437v-438h721v-201h-934zM735 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ě" horiz-adv-x="1120" d="M578 -25q-109 0 -197 37.5t-150 108t-95.5 172.5t-33.5 231t34 230.5t96.5 172t150 107.5t195.5 37q93 0 174.5 -29.5t143 -90t97 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-705q1 -65 23.5 -116.5t59.5 -88t84 -56t98 -19.5q65 0 120.5 27t96.5 79l139 -143 q-57 -68 -146 -110t-212 -42zM578 883q-50 0 -96 -14.5t-81.5 -46t-58.5 -82.5t-27 -124h500q-2 73 -23 124t-55 82.5t-75.5 46t-83.5 14.5zM686 1180h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="Ĝ" horiz-adv-x="1413" d="M758 -25q-111 0 -200 29t-158.5 80t-120 121.5t-83 152.5t-48 173.5t-15.5 185.5q0 87 16.5 176t50.5 171.5t85 154.5t120.5 125t157.5 83.5t195 30.5q103 0 179.5 -22.5t134.5 -60.5t100 -87t75 -102l-192 -111q-25 35 -53.5 66.5t-64.5 55.5t-80 38t-99 14 q-78 0 -138.5 -23t-105 -62.5t-75.5 -92t-49.5 -111.5t-27 -121.5t-8.5 -121.5q0 -103 23 -199t72 -170.5t125.5 -119t183.5 -44.5q67 0 123 23.5t96 64.5t62 96t22 119v29h-258v201h477v-187q0 -135 -40 -238.5t-110.5 - [...]
-<glyph unicode="ĝ" horiz-adv-x="1151" d="M397 -246q115 7 190 27t119 50.5t61 71.5t17 89v92q-87 -109 -252 -109q-90 0 -168 34.5t-136.5 103.5t-92 171.5t-33.5 239.5q0 129 32 230.5t89.5 172t137.5 107.5t176 37q46 0 84 -8.5t68.5 -22.5t54 -31.5t40.5 -35.5v74h213v-967q0 -120 -24 -204t-82 -144 q-39 -40 -85.5 -65.5t-97 -41.5t-102.5 -23.5t-102 -11.5zM571 176q65 0 121.5 32t91.5 91v453q-16 26 -39.5 47.5t-51 37.5t-59 24.5t-63.5 8.5q-125 0 -190.5 -88.5t-65.5 -257.5q0 -80 18 -144.5t51 -109.5t80.5 -6 [...]
-<glyph unicode="Ğ" horiz-adv-x="1413" d="M758 -25q-111 0 -200 29t-158.5 80t-120 121.5t-83 152.5t-48 173.5t-15.5 185.5q0 87 16.5 176t50.5 171.5t85 154.5t120.5 125t157.5 83.5t195 30.5q103 0 179.5 -22.5t134.5 -60.5t100 -87t75 -102l-192 -111q-25 35 -53.5 66.5t-64.5 55.5t-80 38t-99 14 q-78 0 -138.5 -23t-105 -62.5t-75.5 -92t-49.5 -111.5t-27 -121.5t-8.5 -121.5q0 -103 23 -199t72 -170.5t125.5 -119t183.5 -44.5q67 0 123 23.5t96 64.5t62 96t22 119v29h-258v201h477v-187q0 -135 -40 -238.5t-110.5 - [...]
-<glyph unicode="ğ" horiz-adv-x="1151" d="M397 -246q115 7 190 27t119 50.5t61 71.5t17 89v92q-87 -109 -252 -109q-90 0 -168 34.5t-136.5 103.5t-92 171.5t-33.5 239.5q0 129 32 230.5t89.5 172t137.5 107.5t176 37q46 0 84 -8.5t68.5 -22.5t54 -31.5t40.5 -35.5v74h213v-967q0 -120 -24 -204t-82 -144 q-39 -40 -85.5 -65.5t-97 -41.5t-102.5 -23.5t-102 -11.5zM571 176q65 0 121.5 32t91.5 91v453q-16 26 -39.5 47.5t-51 37.5t-59 24.5t-63.5 8.5q-125 0 -190.5 -88.5t-65.5 -257.5q0 -80 18 -144.5t51 -109.5t80.5 -6 [...]
-<glyph unicode="Ġ" horiz-adv-x="1413" d="M758 -25q-111 0 -200 29t-158.5 80t-120 121.5t-83 152.5t-48 173.5t-15.5 185.5q0 87 16.5 176t50.5 171.5t85 154.5t120.5 125t157.5 83.5t195 30.5q103 0 179.5 -22.5t134.5 -60.5t100 -87t75 -102l-192 -111q-25 35 -53.5 66.5t-64.5 55.5t-80 38t-99 14 q-78 0 -138.5 -23t-105 -62.5t-75.5 -92t-49.5 -111.5t-27 -121.5t-8.5 -121.5q0 -103 23 -199t72 -170.5t125.5 -119t183.5 -44.5q67 0 123 23.5t96 64.5t62 96t22 119v29h-258v201h477v-187q0 -135 -40 -238.5t-110.5 - [...]
-<glyph unicode="ġ" horiz-adv-x="1151" d="M397 -246q115 7 190 27t119 50.5t61 71.5t17 89v92q-87 -109 -252 -109q-90 0 -168 34.5t-136.5 103.5t-92 171.5t-33.5 239.5q0 129 32 230.5t89.5 172t137.5 107.5t176 37q46 0 84 -8.5t68.5 -22.5t54 -31.5t40.5 -35.5v74h213v-967q0 -120 -24 -204t-82 -144 q-39 -40 -85.5 -65.5t-97 -41.5t-102.5 -23.5t-102 -11.5zM571 176q65 0 121.5 32t91.5 91v453q-16 26 -39.5 47.5t-51 37.5t-59 24.5t-63.5 8.5q-125 0 -190.5 -88.5t-65.5 -257.5q0 -80 18 -144.5t51 -109.5t80.5 -6 [...]
-<glyph unicode="Ģ" horiz-adv-x="1413" d="M758 -25q-111 0 -200 29t-158.5 80t-120 121.5t-83 152.5t-48 173.5t-15.5 185.5q0 87 16.5 176t50.5 171.5t85 154.5t120.5 125t157.5 83.5t195 30.5q103 0 179.5 -22.5t134.5 -60.5t100 -87t75 -102l-192 -111q-25 35 -53.5 66.5t-64.5 55.5t-80 38t-99 14 q-78 0 -138.5 -23t-105 -62.5t-75.5 -92t-49.5 -111.5t-27 -121.5t-8.5 -121.5q0 -103 23 -199t72 -170.5t125.5 -119t183.5 -44.5q67 0 123 23.5t96 64.5t62 96t22 119v29h-258v201h477v-187q0 -135 -40 -238.5t-110.5 - [...]
-<glyph unicode="ģ" horiz-adv-x="1151" d="M397 -246q115 7 190 27t119 50.5t61 71.5t17 89v92q-87 -109 -252 -109q-90 0 -168 34.5t-136.5 103.5t-92 171.5t-33.5 239.5q0 129 32 230.5t89.5 172t137.5 107.5t176 37q46 0 84 -8.5t68.5 -22.5t54 -31.5t40.5 -35.5v74h213v-967q0 -120 -24 -204t-82 -144 q-39 -40 -85.5 -65.5t-97 -41.5t-102.5 -23.5t-102 -11.5zM571 176q65 0 121.5 32t91.5 91v453q-16 26 -39.5 47.5t-51 37.5t-59 24.5t-63.5 8.5q-125 0 -190.5 -88.5t-65.5 -257.5q0 -80 18 -144.5t51 -109.5t80.5 -6 [...]
-<glyph unicode="Ĥ" horiz-adv-x="1448" d="M1051 0v639h-654v-639h-213v1434h213v-594h654v594h213v-1434h-213zM915 1569l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="ĥ" d="M801 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1380l213 99v-533q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213zM782 1567l-192 161l-193 -161h-205l291 319h215 l289 -319h-205z" />
-<glyph unicode="Ħ" horiz-adv-x="1448" d="M1051 0v639h-654v-639h-213v1053h-184v200h184v181h213v-181h654v181h213v-181h184v-200h-184v-1053h-213zM1051 840v213h-654v-213h654z" />
-<glyph unicode="ħ" d="M367 1110v-225q51 65 117 95t153 30q82 0 151 -23.5t119.5 -77t78.5 -138.5t28 -208v-563h-213v563q0 66 -16 112.5t-44 76.5t-66 43.5t-83 13.5q-48 0 -89 -12.5t-71 -41t-47.5 -74.5t-17.5 -114v-567h-213v1110h-123v180h123v90l213 99v-189h290v-180h-290z" />
-<glyph unicode="Ĩ" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM463 1591q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -110.5 t-37.5 -84t-67 -53t-99 -18.5z" />
-<glyph unicode="ĩ" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM425 1202q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -110.5 t-37.5 -84t-67 -53t-99 -18.5z" />
-<glyph unicode="Ī" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM-36 1612v176h676v-176h-676z" />
-<glyph unicode="ī" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM-74 1223v176h676v-176h-676z" />
-<glyph unicode="Ĭ" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM302 1569q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 -35.5t84 -13t84 13t63 35.5t39.5 53t13.5 66.5h181q0 -69 -22.5 -131t-69.5 -108.5t-119 -73.5t-170 -27z" />
-<glyph unicode="ĭ" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM264 1180q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 -35.5t84 -13t84 13t63 35.5t39.5 53t13.5 66.5h181q0 -69 -22.5 -131t-69.5 -108.5t-119 -73.5t-170 -27z" />
-<glyph unicode="Į" horiz-adv-x="602" d="M211 -489q-56 0 -101.5 14t-78 41t-50.5 66t-18 89q0 93 61.5 164.5t170.5 114.5v1434h213v-1434q-73 -21 -124 -50.5t-83.5 -64t-47 -72t-14.5 -73.5q0 -53 29.5 -80t73.5 -27q29 0 50 12t34 27l104 -75q-31 -35 -84.5 -60.5t-134.5 -25.5z" />
-<glyph unicode="į" horiz-adv-x="524" d="M176 -489q-55 0 -100.5 14t-78.5 41t-51 66t-18 89q0 91 59.5 162.5t168.5 116.5v1047h213v-1047q-72 -21 -122 -50.5t-82 -64t-46.5 -72t-14.5 -73.5q0 -53 30 -80t73 -27q29 0 50 12t34 27l104 -75q-31 -35 -84 -60.5t-135 -25.5zM262 1210q-27 0 -50.5 10t-41 27.5 t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="İ" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM302 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="ı" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213z" />
-<glyph unicode="IJ" horiz-adv-x="1686" d="M195 0v1434h213v-1434h-213zM1034 -25q-77 0 -146 17.5t-127.5 51.5t-104.5 84.5t-77 117.5l185 90q38 -81 107 -118.5t163 -37.5q51 0 98.5 13t84 49t59 100.5t22.5 167.5v924h213v-953q0 -100 -20.5 -176.5t-55 -133t-81.5 -94t-100 -60.5t-109.5 -32.5t-110.5 -9.5z " />
-<glyph unicode="ij" horiz-adv-x="1036" d="M156 0v1047h213v-1047h-213zM262 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM383 -281q81 18 135.5 39.5t87.5 51.5t47.5 70.5t14.5 96.5v1070 h213v-998q0 -46 -4 -91.5t-15 -88.5t-30 -81.5t-50 -70.5q-36 -38 -74 -64t-76 -43.5t-75 -29t-72 -18.5zM774 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 - [...]
-<glyph unicode="Ĵ" d="M506 -25q-77 0 -146 17.5t-127.5 51.5t-104.5 84.5t-77 117.5l185 90q38 -81 107 -118.5t163 -37.5q51 0 98.5 13t84 49t59 100.5t22.5 167.5v924h213v-953q0 -100 -20.5 -176.5t-55 -133t-81.5 -94t-100 -60.5t-109.5 -32.5t-110.5 -9.5zM1065 1569l-192 161l-193 -161 h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="ĵ" horiz-adv-x="524" d="M-129 -281q81 18 135.5 39.5t87.5 51.5t47.5 70.5t14.5 96.5v1070h213v-998q0 -46 -4 -91.5t-15 -88.5t-30 -81.5t-50 -70.5q-36 -38 -74 -64t-76 -43.5t-75 -29t-72 -18.5zM456 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ķ" horiz-adv-x="1386" d="M1055 0l-404 750l-254 -308v-442h-213v1434h213v-695l553 695h246l-401 -504l510 -930h-250zM612 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46 t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="ķ" horiz-adv-x="1126" d="M154 0v1378l213 101v-756l309 324h262l-287 -291l393 -756h-233l-309 602l-135 -135v-467h-213zM465 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46 t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="ĸ" horiz-adv-x="1116" d="M799 0l-260 485l-172 -209v-276h-213v1047h213v-486l387 486h241l-317 -392l356 -655h-235z" />
-<glyph unicode="Ĺ" horiz-adv-x="1190" d="M184 0v1434h213v-1229h701v-205h-914zM300 1569h-217l290 319h291z" />
-<glyph unicode="ĺ" horiz-adv-x="541" d="M164 0v1378l213 101v-1479h-213zM237 1549h-217l290 319h291z" />
-<glyph unicode="Ļ" horiz-adv-x="1190" d="M184 0v1434h213v-1229h701v-205h-914zM536 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="ļ" horiz-adv-x="541" d="M164 0v1378l213 101v-1479h-213zM169 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="Ľ" horiz-adv-x="1190" d="M184 0v1434h213v-1229h701v-205h-914zM404 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ľ" horiz-adv-x="858" d="M164 0v1378l213 101v-1479h-213zM807 1459v-16q0 -151 -50.5 -269.5t-150.5 -216.5l-66 49q14 28 25 78t19 111t12 129.5t4 134.5h207z" />
-<glyph unicode="Ŀ" horiz-adv-x="1190" d="M184 0v1434h213v-1229h701v-205h-914zM887 586q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="ŀ" horiz-adv-x="758" d="M164 0v1378l213 101v-1479h-213zM650 517q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="Ł" horiz-adv-x="1190" d="M184 0v487l-143 -108v235l143 109v711h213v-547l269 205v-236l-269 -205v-446h701v-205h-914z" />
-<glyph unicode="ł" horiz-adv-x="541" d="M164 0v489l-123 -94v199l123 94v690l213 101v-629l123 94v-199l-123 -94v-651h-213z" />
-<glyph unicode="Ń" horiz-adv-x="1454" d="M1069 0l-620 973l-58 106l6 -106v-973h-213v1434h215l605 -957l59 -110l-6 108v959h213v-1434h-201zM748 1569h-217l290 319h291z" />
-<glyph unicode="ń" d="M801 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1047h213v-101q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213zM549 1180h-217l290 319h291z" />
-<glyph unicode="Ņ" horiz-adv-x="1454" d="M1069 0l-620 973l-58 106l6 -106v-973h-213v1434h215l605 -957l59 -110l-6 108v959h213v-1434h-201zM633 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5 t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="ņ" d="M801 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1047h213v-101q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213zM489 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5 q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="Ň" horiz-adv-x="1454" d="M1069 0l-620 973l-58 106l6 -106v-973h-213v1434h215l605 -957l59 -110l-6 108v959h213v-1434h-201zM854 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ň" d="M801 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1047h213v-101q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213zM696 1180h-215l-289 319h205l193 -162l192 162h205z " />
-<glyph unicode="ʼn" horiz-adv-x="1542" d="M1186 0v625q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1047h213v-101q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-625h-213zM84 946q14 12 33.5 34.5t38.5 51t33.5 61t19.5 66.5 q-27 2 -52 13.5t-43.5 30t-30 44t-11.5 56.5q0 35 12.5 63.5t33 49t48 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-47.5 -104.5t-70.5 -89t-85.5 -68.5z" />
-<glyph unicode="Ŋ" horiz-adv-x="1454" d="M760 -342q81 18 135.5 39.5t87.5 51.5t47 70.5t14 96.5v123l-595 934l-58 106l6 -106v-973h-213v1434h215l605 -957l59 -110l-6 108v959h213v-1446q0 -46 -5 -91.5t-17 -88.5t-32 -81.5t-51 -70.5q-36 -38 -75 -64t-78 -44t-77.5 -29t-72.5 -19z" />
-<glyph unicode="ŋ" d="M516 -342q81 18 135.5 39.5t87.5 51.5t47.5 70.5t14.5 96.5v709q0 66 -16 112.5t-44 76t-66 43t-83 13.5q-48 0 -89 -12.5t-71 -40.5t-47.5 -74.5t-17.5 -113.5v-629h-213v1047h213v-101q51 65 117 95t153 30q82 0 151 -23.5t119.5 -76.5t78.5 -138t28 -208v-637 q0 -46 -4 -91.5t-15 -88.5t-30 -81.5t-50 -70.5q-36 -38 -74 -64t-76 -44t-75 -29t-72 -19z" />
-<glyph unicode="Ō" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -12 [...]
-<glyph unicode="ō" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM242 1223v176h676v-176h-676z" />
-<glyph unicode="Ŏ" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -12 [...]
-<glyph unicode="ŏ" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM580 1180q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 [...]
-<glyph unicode="Ő" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -12 [...]
-<glyph unicode="ő" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM406 1180h-218l291 319h291zM836 1180h-217l290 319h291z" />
-<glyph unicode="Œ" horiz-adv-x="1831" d="M936 0q-42 -12 -89 -18.5t-99 -6.5q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 82 13 169t43 169.5t78 155.5t117.5 128t162 87t211.5 32q102 0 188 -24h778v-201h-573v-393h334v-201h-334v-438h618v-201h-823zM748 180q54 0 98.5 12 t81.5 33v983q-77 47 -180 47q-75 0 -134.5 -23.5t-104.5 -64t-76.5 -94t-51 -113.5t-28.5 -122.5t-9 -120.5q0 -95 21 -191t69 -173t125 -125t189 -48z" />
-<glyph unicode="œ" horiz-adv-x="1860" d="M1317 -25q-119 0 -211.5 43.5t-155.5 126.5q-63 -83 -156.5 -126.5t-213.5 -43.5q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q120 0 213.5 -44t156.5 -128q62 84 155 128t212 44q93 0 175 -29.5t143 -90 t96.5 -152.5t35.5 -217q0 -34 -1.5 -72t-6.5 -66h-704q1 -65 23.5 -116.5t59 -88t83.5 -56t98 -19.5q65 0 121 27t96 79l139 -143q-57 -68 -146 -110t-212 -42zM1317 883q-50 0 -95.5 -14.5t-81.5 -46t-58.5 -82.5t-26.5 [...]
-<glyph unicode="Ŕ" horiz-adv-x="1384" d="M184 0v1434h604q132 0 221 -36t143 -95.5t76.5 -135.5t22.5 -157q0 -62 -18 -125t-55.5 -118.5t-96.5 -98t-141 -64.5l295 -604h-244l-282 586h-312v-586h-213zM793 791q67 0 112 18.5t73 49.5t40 70t12 81q0 37 -10.5 76t-37.5 71t-73 52t-116 20h-396v-438h396z M637 1569h-217l290 319h291z" />
-<glyph unicode="ŕ" horiz-adv-x="827" d="M154 0v1047h213v-127q37 80 96 115.5t137 35.5q104 0 176 -63l-31 -197q-29 20 -68.5 35.5t-96.5 15.5q-36 0 -73.5 -12.5t-68.5 -44t-51 -84t-20 -133.5v-588h-213zM453 1180h-217l290 319h291z" />
-<glyph unicode="Ŗ" horiz-adv-x="1384" d="M184 0v1434h604q132 0 221 -36t143 -95.5t76.5 -135.5t22.5 -157q0 -62 -18 -125t-55.5 -118.5t-96.5 -98t-141 -64.5l295 -604h-244l-282 586h-312v-586h-213zM793 791q67 0 112 18.5t73 49.5t40 70t12 81q0 37 -10.5 76t-37.5 71t-73 52t-116 20h-396v-438h396z M565 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="ŗ" horiz-adv-x="827" d="M154 0v1047h213v-127q37 80 96 115.5t137 35.5q104 0 176 -63l-31 -197q-29 20 -68.5 35.5t-96.5 15.5q-36 0 -73.5 -12.5t-68.5 -44t-51 -84t-20 -133.5v-588h-213zM158 -536q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50 q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="Ř" horiz-adv-x="1384" d="M184 0v1434h604q132 0 221 -36t143 -95.5t76.5 -135.5t22.5 -157q0 -62 -18 -125t-55.5 -118.5t-96.5 -98t-141 -64.5l295 -604h-244l-282 586h-312v-586h-213zM793 791q67 0 112 18.5t73 49.5t40 70t12 81q0 37 -10.5 76t-37.5 71t-73 52t-116 20h-396v-438h396z M764 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ř" horiz-adv-x="827" d="M154 0v1047h213v-127q37 80 96 115.5t137 35.5q104 0 176 -63l-31 -197q-29 20 -68.5 35.5t-96.5 15.5q-36 0 -73.5 -12.5t-68.5 -44t-51 -84t-20 -133.5v-588h-213zM584 1180h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="Ś" horiz-adv-x="1237" d="M854 1108q-13 31 -36 57.5t-56 46t-75.5 30.5t-94.5 11q-103 0 -161 -45t-58 -127q0 -51 32 -86.5t84 -62.5t118.5 -49.5t136 -49t136 -61t118.5 -84t84 -119t32 -166.5q0 -103 -37.5 -182.5t-103.5 -134t-155.5 -83t-192.5 -28.5q-96 0 -179.5 24.5t-151 70t-116.5 110 t-76 144.5l201 73q22 -48 54.5 -88t74.5 -68.5t92.5 -44.5t108.5 -16t106 14t82 42t53 69.5t19 95.5q0 64 -32 107.5t-84 75.5t-118.5 56l-136 49t-136 56t-118.5 76t-84 108t-32 154q0 76 30.5 144t88.5 119. [...]
-<glyph unicode="ś" horiz-adv-x="973" d="M477 -25q-62 0 -121 12t-110.5 34t-93.5 53.5t-70 70.5l151 129q16 -15 39 -33.5t53 -34.5t67 -27t81 -11q40 0 74 6t58.5 19t38 34.5t13.5 53.5q0 37 -25.5 63.5t-66.5 48t-93 41t-105 41.5q-49 21 -96 46t-83.5 60t-59 83.5t-22.5 117.5q0 70 28.5 124t78 90.5 t116 55.5t142.5 19q67 0 123.5 -13t102 -35t80.5 -50.5t61 -59.5l-154 -122q-14 14 -33.5 29.5t-45.5 28.5t-59 21.5t-75 8.5q-68 0 -107.5 -25t-39.5 -72q0 -30 21 -52.5t57 -41.5t82.5 -36.5l97.5 -37.5q54 -21 106 [...]
-<glyph unicode="Ŝ" horiz-adv-x="1237" d="M854 1108q-13 31 -36 57.5t-56 46t-75.5 30.5t-94.5 11q-103 0 -161 -45t-58 -127q0 -51 32 -86.5t84 -62.5t118.5 -49.5t136 -49t136 -61t118.5 -84t84 -119t32 -166.5q0 -103 -37.5 -182.5t-103.5 -134t-155.5 -83t-192.5 -28.5q-96 0 -179.5 24.5t-151 70t-116.5 110 t-76 144.5l201 73q22 -48 54.5 -88t74.5 -68.5t92.5 -44.5t108.5 -16t106 14t82 42t53 69.5t19 95.5q0 64 -32 107.5t-84 75.5t-118.5 56l-136 49t-136 56t-118.5 76t-84 108t-32 154q0 76 30.5 144t88.5 119. [...]
-<glyph unicode="ŝ" horiz-adv-x="973" d="M477 -25q-62 0 -121 12t-110.5 34t-93.5 53.5t-70 70.5l151 129q16 -15 39 -33.5t53 -34.5t67 -27t81 -11q40 0 74 6t58.5 19t38 34.5t13.5 53.5q0 37 -25.5 63.5t-66.5 48t-93 41t-105 41.5q-49 21 -96 46t-83.5 60t-59 83.5t-22.5 117.5q0 70 28.5 124t78 90.5 t116 55.5t142.5 19q67 0 123.5 -13t102 -35t80.5 -50.5t61 -59.5l-154 -122q-14 14 -33.5 29.5t-45.5 28.5t-59 21.5t-75 8.5q-68 0 -107.5 -25t-39.5 -72q0 -30 21 -52.5t57 -41.5t82.5 -36.5l97.5 -37.5q54 -21 106 [...]
-<glyph unicode="Ş" horiz-adv-x="1237" d="M614 -489q-74 0 -123.5 21.5t-80.5 55.5l104 76q12 -15 31.5 -23t48.5 -8q46 0 74 25t28 66q0 38 -31.5 63t-78.5 25q-49 0 -82 -21l-41 51l108 135q-86 7 -160.5 34t-135 72t-104.5 105.5t-69 135.5l201 73q22 -48 54.5 -88t74.5 -68.5t92.5 -44.5t108.5 -16t106 14 t82 42t53 69.5t19 95.5q0 64 -32 107.5t-84 75.5t-118.5 56l-136 49t-136 56t-118.5 76t-84 108t-32 154q0 76 30.5 144t88.5 119.5t143 81.5t194 30q91 0 165 -21.5t129.5 -59.5t92.5 -90t56 -113l-197 -66q-13 [...]
-<glyph unicode="ş" horiz-adv-x="973" d="M449 -489q-75 0 -124.5 21.5t-80.5 55.5l104 76q13 -15 32 -23t48 -8q46 0 74 25t28 66q0 38 -31.5 63t-78.5 25q-49 0 -82 -21l-41 51l111 138q-52 6 -100.5 20t-90.5 35t-76.5 48.5t-58.5 61.5l151 129q16 -15 39 -33.5t53 -34.5t67 -27t81 -11q40 0 74 6t58.5 19 t38 34.5t13.5 53.5q0 37 -25.5 63.5t-66.5 48t-93 41t-105 41.5q-49 21 -96 46t-83.5 60t-59 83.5t-22.5 117.5q0 70 28.5 124t78 90.5t116 55.5t142.5 19q67 0 123.5 -13t102 -35t80.5 -50.5t61 -59.5l-154 -122q- [...]
-<glyph unicode="Š" horiz-adv-x="1237" d="M854 1108q-13 31 -36 57.5t-56 46t-75.5 30.5t-94.5 11q-103 0 -161 -45t-58 -127q0 -51 32 -86.5t84 -62.5t118.5 -49.5t136 -49t136 -61t118.5 -84t84 -119t32 -166.5q0 -103 -37.5 -182.5t-103.5 -134t-155.5 -83t-192.5 -28.5q-96 0 -179.5 24.5t-151 70t-116.5 110 t-76 144.5l201 73q22 -48 54.5 -88t74.5 -68.5t92.5 -44.5t108.5 -16t106 14t82 42t53 69.5t19 95.5q0 64 -32 107.5t-84 75.5t-118.5 56l-136 49t-136 56t-118.5 76t-84 108t-32 154q0 76 30.5 144t88.5 119. [...]
-<glyph unicode="š" horiz-adv-x="973" d="M477 -25q-62 0 -121 12t-110.5 34t-93.5 53.5t-70 70.5l151 129q16 -15 39 -33.5t53 -34.5t67 -27t81 -11q40 0 74 6t58.5 19t38 34.5t13.5 53.5q0 37 -25.5 63.5t-66.5 48t-93 41t-105 41.5q-49 21 -96 46t-83.5 60t-59 83.5t-22.5 117.5q0 70 28.5 124t78 90.5 t116 55.5t142.5 19q67 0 123.5 -13t102 -35t80.5 -50.5t61 -59.5l-154 -122q-14 14 -33.5 29.5t-45.5 28.5t-59 21.5t-75 8.5q-68 0 -107.5 -25t-39.5 -72q0 -30 21 -52.5t57 -41.5t82.5 -36.5l97.5 -37.5q54 -21 106 [...]
-<glyph unicode="Ţ" horiz-adv-x="1208" d="M569 -489q-74 0 -123.5 21.5t-80.5 55.5l104 76q12 -15 31.5 -23t48.5 -8q46 0 74 25t28 66q0 38 -31.5 63t-78.5 25q-49 0 -82 -21l-41 51l127 158h-47v1229h-406v205h1024v-205h-405v-1229h-45l-64 -102q60 0 102.5 -14.5t70 -38.5t40 -56t12.5 -68q0 -46 -18 -84.5 t-51.5 -66.5t-81.5 -43.5t-107 -15.5z" />
-<glyph unicode="ţ" horiz-adv-x="776" d="M399 -489q-74 0 -123.5 21.5t-80.5 55.5l104 76q12 -15 31.5 -23t48.5 -8q46 0 74 25t28 66q0 38 -31.5 63t-78.5 25q-49 0 -82 -21l-41 51l117 148q-76 23 -109 87.5t-33 158.5v626h-162v185h162v329l213 103v-432h248v-185h-248v-571q0 -26 4.5 -48t16 -38t31.5 -24.5 t51 -8.5q76 0 155 49l-26 -198q-41 -19 -87 -31.5t-102 -16.5l-47 -77q60 0 102.5 -14.5t70 -38.5t40 -56t12.5 -68q0 -46 -18 -84.5t-51.5 -66.5t-81.5 -43.5t-107 -15.5z" />
-<glyph unicode="Ť" horiz-adv-x="1208" d="M711 1229v-1229h-213v1229h-406v205h1024v-205h-405zM709 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ť" horiz-adv-x="909" d="M457 -25q-62 0 -106.5 18.5t-73 52.5t-41.5 82.5t-13 107.5v626h-162v185h162v329l213 103v-432h248v-185h-248v-571q0 -26 4.5 -48t16 -38t31.5 -24.5t51 -8.5q76 0 155 49l-26 -198q-44 -22 -96 -35t-115 -13zM858 1720v-16q0 -151 -50.5 -269.5t-150.5 -216.5l-66 49 q14 28 25 78t19 111t12 129.5t4 134.5h207z" />
-<glyph unicode="Ŧ" horiz-adv-x="1188" d="M700 551v-551h-213v551h-247v201h247v477h-395v205h1004v-205h-396v-477h246v-201h-246z" />
-<glyph unicode="ŧ" horiz-adv-x="797" d="M467 -25q-62 0 -106.5 18.5t-73 52.5t-41.5 82.5t-13 107.5v204h-161v185h161v237h-161v185h161v329l213 103v-432h248v-185h-248v-237h248v-185h-248v-149q0 -26 4.5 -48t16 -38t31.5 -24.5t51 -8.5q76 0 156 49l-27 -198q-43 -22 -95.5 -35t-115.5 -13z" />
-<glyph unicode="Ũ" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM874 1591 q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -11 [...]
-<glyph unicode="ũ" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM731 1202q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5 t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -110.5t-37.5 -84t-67 -53t-99 -18.5z" />
-<glyph unicode="Ū" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM375 1612v176h676v-176 h-676z" />
-<glyph unicode="ū" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM232 1223v176h676v-176h-676z" />
-<glyph unicode="Ŭ" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM713 1569 q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 -35.5t84 -13t84 13t63 35.5t39.5 53t13.5 66.5h181q0 -69 -22.5 -131t-69.5 -108.5t-119 -73.5t-170 -27z" />
-<glyph unicode="ŭ" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM570 1180q-99 0 -170.5 27.5t-118 74t-68.5 108 t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 -35.5t84 -13t84 13t63 35.5t39.5 53t13.5 66.5h181q0 -69 -22.5 -131t-69.5 -108.5t-119 -73.5t-170 -27z" />
-<glyph unicode="Ů" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM713 1632q38 0 65 27.5 t27 68.5q0 42 -27 69.5t-65 27.5q-39 0 -66 -27.5t-27 -69.5t27 -69t66 -27zM713 1479q-52 0 -97.5 19.5t-80 53.5t-54.5 79.5t-20 96.5q0 52 20 97t54.5 78.5t80 53t97.5 19.5q51 0 97 -19.5t80.5 -53t54.5 -78 [...]
-<glyph unicode="ů" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM570 1243q38 0 65 27.5t27 68.5q0 42 -27 69.5 t-65 27.5q-39 0 -66 -27.5t-27 -69.5t27 -69t66 -27zM570 1090q-52 0 -97.5 19.5t-80 53.5t-54.5 79.5t-20 96.5q0 52 20 97t54.5 78.5t80 53t97.5 19.5q51 0 97 -19.5t80.5 -53t54.5 -78.5t20 -97q0 -51 -20 -96.5t-54.5 -79.5t-80.5 - [...]
-<glyph unicode="Ű" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM539 1569h-218l291 319 h291zM969 1569h-217l290 319h291z" />
-<glyph unicode="ű" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM396 1180h-218l291 319h291zM826 1180h-217l290 319 h291z" />
-<glyph unicode="Ų" horiz-adv-x="1427" d="M768 -489q-55 0 -100.5 14t-78.5 41t-51 66t-18 89q0 81 46.5 145t129.5 109q-108 3 -203 33t-166 97t-112 174t-41 264v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -121 -23 -209.5t-64.5 -152 t-98.5 -105.5t-125 -70q-127 -51 -186.5 -120t-59.5 -146q0 -53 29.5 -80t73.5 -27q29 0 50 12t34 27l104 -75q-31 -35 -84 -60.5t-135 -25.5z" />
-<glyph unicode="ų" d="M811 -489q-55 0 -100.5 14t-78.5 41t-51 66t-18 89q0 91 59.5 162.5t168.5 116.5v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114 v629h213v-1047q-72 -21 -122 -50.5t-82 -64t-46.5 -72t-14.5 -73.5q0 -53 29.5 -80t73.5 -27q29 0 50 12t34 27l104 -75q-31 -35 -84 -60.5t-135 -25.5z" />
-<glyph unicode="Ŵ" horiz-adv-x="1745" d="M1653 1434l-310 -1434h-192l-248 930q-15 57 -23.5 92.5t-11.5 62.5q-3 -27 -11 -62.5t-23 -92.5l-242 -930h-193l-307 1434h219l178 -883l20.5 -100t10.5 -66q3 28 12.5 65.5l24.5 100.5l225 883h179l231 -881q17 -62 25.5 -99t11.5 -65q3 28 9.5 66t18.5 98l181 881 h215zM1065 1569l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="ŵ" horiz-adv-x="1487" d="M1137 0h-189l-174 608l-10.5 37t-10 36.5t-8 32.5t-4.5 25q-2 -10 -4.5 -25.5t-6.5 -33.5t-9 -37l-10 -37l-172 -606h-189l-278 1047h209l147 -652q8 -34 12.5 -59.5l8.5 -46.5q3 21 8.5 48t13.5 58l174 652h197l174 -652q8 -31 13.5 -58t8.5 -48q3 21 8 46.5t13 59.5 l147 652h209zM936 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ŷ" horiz-adv-x="1364" d="M575 0v561l-503 873h243l367 -656l367 656h243l-504 -871v-563h-213zM874 1569l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="ŷ" horiz-adv-x="1102" d="M498 -397h-217l161 415l-391 1029h211l258 -685q11 -29 17 -48t12 -44q6 25 12 44t17 48l260 685h213zM741 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="Ÿ" horiz-adv-x="1364" d="M575 0v561l-503 873h243l367 -656l367 656h243l-504 -871v-563h-213zM497 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM866 1599q-27 0 -50.5 10 t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="Ź" horiz-adv-x="1356" d="M113 0v172l827 1057h-780v205h1063v-172l-830 -1057h830v-205h-1110zM693 1569h-217l290 319h291z" />
-<glyph unicode="ź" horiz-adv-x="1047" d="M113 0v174l555 684h-514v189h770v-168l-555 -691h553v-188h-809zM488 1180h-217l290 319h291z" />
-<glyph unicode="Ż" horiz-adv-x="1356" d="M113 0v172l827 1057h-780v205h1063v-172l-830 -1057h830v-205h-1110zM693 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="ż" horiz-adv-x="1047" d="M113 0v174l555 684h-514v189h770v-168l-555 -691h553v-188h-809zM529 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="Ž" horiz-adv-x="1356" d="M113 0v172l827 1057h-780v205h1063v-172l-830 -1057h830v-205h-1110zM799 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ž" horiz-adv-x="1047" d="M113 0v174l555 684h-514v189h770v-168l-555 -691h553v-188h-809zM635 1180h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ſ" horiz-adv-x="623" d="M164 0v1214q0 72 20.5 119.5t54.5 75t77 38.5t87 11q62 0 112.5 -13t96.5 -36v-176q-48 20 -82.5 28.5t-66.5 8.5q-26 0 -43 -7t-26.5 -20t-13 -30t-3.5 -37v-1176h-213z" />
-<glyph unicode="ƒ" horiz-adv-x="748" d="M-35 -330q81 18 136.5 41.5t90.5 57t50.5 77.5t15.5 103v913h-156v185h156v167q0 72 21 119.5t54.5 75t76.5 38.5t88 11q62 0 112 -13t97 -36v-172q-48 17 -83 25t-67 8q-26 0 -43 -7t-26.5 -20t-13 -30t-3.5 -37v-129h236v-185h-236v-874q0 -46 -4 -91.5t-14.5 -88.5 t-29.5 -81.5t-50 -70.5q-36 -38 -74 -64t-76 -44t-75.5 -29t-71.5 -19z" />
-<glyph unicode="Ǎ" horiz-adv-x="1425" d="M1118 0l-114 330h-580l-117 -330h-221l518 1434h219l516 -1434h-221zM764 1006l-26.5 79t-22.5 82q-10 -37 -23.5 -81.5t-25.5 -79.5l-177 -488h449zM819 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ǎ" horiz-adv-x="1090" d="M956 0h-204v90q-20 -21 -48 -41.5t-63.5 -37t-79.5 -26.5t-96 -10q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5q-98 0 -169.5 -26 t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q44 0 91 -5t91.5 -19.5t84 -41.5t69 -70.5t46.5 -106.5t17 -150v-678zM504 164q81 0 143.5 33t104.5 79v160q-44 23 -103 37t-121 14q-48 0 -90 -9t-73.5 - [...]
-<glyph unicode="Ǐ" horiz-adv-x="602" d="M195 0v1434h213v-1434h-213zM408 1569h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ǐ" horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM370 1180h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="Ǒ" horiz-adv-x="1516" d="M758 -25q-115 0 -206 30.5t-160.5 83t-118.5 124t-80 154t-45.5 172t-14.5 178.5q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5t206 -30.5t160.5 -83t118.5 -124t80 -154t45 -172t14 -177.5q0 -89 -14 -178.5t-45 -172t-80 -154t-118.5 -124t-160.5 -83 t-206 -30.5zM758 180q75 0 134 23.5t103.5 63.5t76 93.5t51.5 113t29 122t9 121.5q0 62 -9 126.5t-29 124t-51.5 111.5t-76 90.5t-103.5 61t-134 22.5t-134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t-29 -12 [...]
-<glyph unicode="ǒ" horiz-adv-x="1161" d="M580 -25q-110 0 -198.5 37t-150.5 107.5t-95.5 172t-33.5 230.5t33.5 231t95.5 172.5t150.5 108t198.5 37.5q109 0 198 -37.5t151 -108t96 -172.5t34 -231t-34 -230.5t-96 -172t-151 -107.5t-198 -37zM580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145t-18.5 145.5 t-53 109.5t-84 69t-110.5 24t-110 -24t-83.5 -69t-53 -109.5t-18.5 -145.5t18.5 -145t53 -108.5t83.5 -68.5t110 -24zM686 1180h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="Ǔ" horiz-adv-x="1427" d="M713 -25q-111 0 -209.5 29t-171.5 95.5t-115.5 175t-42.5 268.5v891h213v-891q0 -108 25 -178t68.5 -111t103.5 -57.5t129 -16.5t128.5 16.5t104 57.5t69.5 111t25 178v891h213v-891q0 -158 -41 -266.5t-113.5 -175.5t-171.5 -96.5t-214 -29.5zM819 1569h-215l-289 319 h205l193 -162l192 162h205z" />
-<glyph unicode="ǔ" d="M791 0v100q-51 -65 -117.5 -95t-153.5 -30q-82 0 -151 23.5t-119.5 77t-78.5 138.5t-28 208v625h213v-625q0 -66 16 -112.5t44 -76.5t66 -43.5t83 -13.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213zM676 1180h-215l-289 319h205l193 -162l192 162h205z " />
-<glyph unicode="Ǽ" horiz-adv-x="1737" d="M833 0v330h-442l-186 -330h-230l832 1434h801v-201h-562v-393h322v-201h-322v-438h607v-201h-820zM833 520v598q-8 -18 -18.5 -39l-21.5 -42.5l-22.5 -42l-21.5 -38.5l-247 -436h331zM1123 1569h-217l290 319h291z" />
-<glyph unicode="ǽ" horiz-adv-x="1757" d="M1214 -25q-118 0 -210.5 43t-155.5 123q-48 -49 -99 -81t-101 -51t-97 -26.5t-86 -7.5q-80 0 -148 22t-118 66.5t-78.5 110t-28.5 152.5q0 90 34 155.5t90 108t128.5 62.5t149.5 20q72 0 138.5 -15.5t119.5 -46.5v58q0 59 -13.5 100t-41 66.5t-69.5 37t-100 11.5 q-98 0 -169.5 -26t-129.5 -69l-63 175q32 22 73 41.5t90 34.5t106 23.5t122 8.5q41 0 85.5 -5t87 -19.5t80.5 -41t67 -69.5q63 66 147.5 100.5t189.5 34.5q93 0 175 -29.5t143.5 -90t97 -152.5t35.5 -217q0 -34 -1.5 [...]
-<glyph unicode="Ǿ" horiz-adv-x="1495" d="M133 -25l146 213q-42 54 -71.5 117t-48 131t-27.5 139t-9 142q0 88 14.5 177.5t45.5 172t80 154t118.5 124t160.5 83t206 30.5q103 0 186 -24.5t149 -67.5l64 92h215l-148 -215q42 -54 71.5 -116.5t49 -130.5t28.5 -138.5t9 -140.5q0 -89 -14.5 -178.5t-45.5 -172 t-79.5 -154t-118 -124t-160.5 -83t-206 -30.5q-104 0 -187 24.5t-149 68.5l-64 -93h-215zM344 717q0 -85 17 -170.5t55 -159.5l547 801q-44 31 -97 48t-118 17q-75 0 -134 -23.5t-104 -63.5t-76.5 -93.5t-51.5 -113t [...]
-<glyph unicode="ǿ" horiz-adv-x="1161" d="M102 -25l113 164q-54 71 -83.5 166.5t-29.5 216.5q0 129 33.5 231t95.5 172.5t150.5 108t198.5 37.5q139 0 243 -59l41 59h195l-115 -164q56 -71 85.5 -167.5t29.5 -217.5q0 -129 -34 -230.5t-96 -172t-151 -107.5t-198 -37q-72 0 -132.5 15.5t-111.5 44.5l-41 -60h-193z M580 176q61 0 110.5 24t84 68.5t53 108.5t18.5 145q0 56 -9 104.5t-28 88.5l-352 -508q52 -31 123 -31zM315 522q0 -111 33 -190l354 508q-26 15 -57 22.5t-65 7.5q-61 0 -110 -24t-83.5 -69t-53 -109.5t-18. [...]
-<glyph unicode="Ș" horiz-adv-x="1237" d="M854 1108q-13 31 -36 57.5t-56 46t-75.5 30.5t-94.5 11q-103 0 -161 -45t-58 -127q0 -51 32 -86.5t84 -62.5t118.5 -49.5t136 -49t136 -61t118.5 -84t84 -119t32 -166.5q0 -103 -37.5 -182.5t-103.5 -134t-155.5 -83t-192.5 -28.5q-96 0 -179.5 24.5t-151 70t-116.5 110 t-76 144.5l201 73q22 -48 54.5 -88t74.5 -68.5t92.5 -44.5t108.5 -16t106 14t82 42t53 69.5t19 95.5q0 64 -32 107.5t-84 75.5t-118.5 56l-136 49t-136 56t-118.5 76t-84 108t-32 154q0 76 30.5 144t88.5 119. [...]
-<glyph unicode="ș" horiz-adv-x="973" d="M477 -25q-62 0 -121 12t-110.5 34t-93.5 53.5t-70 70.5l151 129q16 -15 39 -33.5t53 -34.5t67 -27t81 -11q40 0 74 6t58.5 19t38 34.5t13.5 53.5q0 37 -25.5 63.5t-66.5 48t-93 41t-105 41.5q-49 21 -96 46t-83.5 60t-59 83.5t-22.5 117.5q0 70 28.5 124t78 90.5 t116 55.5t142.5 19q67 0 123.5 -13t102 -35t80.5 -50.5t61 -59.5l-154 -122q-14 14 -33.5 29.5t-45.5 28.5t-59 21.5t-75 8.5q-68 0 -107.5 -25t-39.5 -72q0 -30 21 -52.5t57 -41.5t82.5 -36.5l97.5 -37.5q54 -21 106 [...]
-<glyph unicode="Ț" horiz-adv-x="1208" d="M711 1229v-1229h-213v1229h-406v205h1024v-205h-405zM504 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5 t-124.5 -141.5z" />
-<glyph unicode="ț" horiz-adv-x="776" d="M457 -25q-62 0 -106.5 18.5t-73 52.5t-41.5 82.5t-13 107.5v626h-162v185h162v329l213 103v-432h248v-185h-248v-571q0 -26 4.5 -48t16 -38t31.5 -24.5t51 -8.5q76 0 155 49l-26 -198q-44 -22 -96 -35t-115 -13zM332 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5 q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="ȷ" horiz-adv-x="524" d="M-129 -281q81 18 135.5 39.5t87.5 51.5t47.5 70.5t14.5 96.5v1070h213v-998q0 -46 -4 -91.5t-15 -88.5t-30 -81.5t-50 -70.5q-36 -38 -74 -64t-76 -43.5t-75 -29t-72 -18.5z" />
-<glyph unicode="" horiz-adv-x="524" d="M-129 -281q81 18 135.5 39.5t87.5 51.5t47.5 70.5t14.5 96.5v1070h213v-998q0 -46 -4 -91.5t-15 -88.5t-30 -81.5t-50 -70.5q-36 -38 -74 -64t-76 -43.5t-75 -29t-72 -18.5z" />
-<glyph unicode="ˆ" horiz-adv-x="819" d="M602 1180l-192 161l-193 -161h-205l291 319h215l289 -319h-205z" />
-<glyph unicode="ˇ" horiz-adv-x="819" d="M516 1180h-215l-289 319h205l193 -162l192 162h205z" />
-<glyph unicode="ˉ" horiz-adv-x="819" d="M72 1223v176h676v-176h-676z" />
-<glyph unicode="˘" horiz-adv-x="819" d="M410 1180q-99 0 -170.5 27.5t-118 74t-68.5 108t-22 130.5h180q0 -36 13.5 -66.5t39 -53t62.5 -35.5t84 -13t84 13t63 35.5t39.5 53t13.5 66.5h181q0 -69 -22.5 -131t-69.5 -108.5t-119 -73.5t-170 -27z" />
-<glyph unicode="˙" horiz-adv-x="819" d="M410 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-<glyph unicode="˚" horiz-adv-x="819" d="M410 1243q38 0 65 27.5t27 68.5q0 42 -27 69.5t-65 27.5q-39 0 -66 -27.5t-27 -69.5t27 -69t66 -27zM410 1090q-52 0 -97.5 19.5t-80 53.5t-54.5 79.5t-20 96.5q0 52 20 97t54.5 78.5t80 53t97.5 19.5q51 0 97 -19.5t80.5 -53t54.5 -78.5t20 -97q0 -51 -20 -96.5 t-54.5 -79.5t-80.5 -53.5t-97 -19.5z" />
-<glyph unicode="˛" horiz-adv-x="819" d="M424 -489q-56 0 -101.5 14t-78 41t-50.5 66t-18 89q0 103 75 179.5t206 119.5h153q-69 -28 -118 -62t-80 -70.5t-45.5 -74t-14.5 -73.5q0 -53 29.5 -80t73.5 -27q29 0 50 12t34 27l104 -75q-31 -35 -84.5 -60.5t-134.5 -25.5z" />
-<glyph unicode="˜" horiz-adv-x="819" d="M571 1202q-54 0 -99.5 15.5t-85.5 33.5l-77 33.5t-76 15.5q-38 0 -65.5 -21.5t-30.5 -68.5h-104q0 61 11.5 110.5t37.5 84.5t66.5 53.5t99.5 18.5q54 0 99.5 -15.5t85.5 -34t77 -34t76 -15.5q38 0 65.5 22t30.5 68h104q0 -61 -11.5 -110.5t-37.5 -84t-67 -53t-99 -18.5z " />
-<glyph unicode="˝" horiz-adv-x="819" d="M236 1180h-218l291 319h291zM666 1180h-217l290 319h291z" />
-<glyph unicode="μ" d="M791 0v100q-51 -65 -115 -95t-135 -30q-122 0 -185 84v-368l-213 -101v1457h213v-625q0 -72 15 -119.5t42.5 -75.5t66 -39.5t85.5 -11.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213z" />
-<glyph unicode="π" horiz-adv-x="1350" d="M1040 -25q-62 0 -106.5 18.5t-72.5 52.5t-41 82.5t-13 107.5v589h-285v-77q0 -138 -9.5 -260t-39.5 -223.5t-85 -180t-146 -129.5l-181 141q49 30 95.5 69t82.5 102.5t58 159t22 239.5v159h-190v222h1135v-222h-244v-538q0 -26 4.5 -48t16 -38t31 -24.5t50.5 -8.5 q77 0 156 49l-31 -194q-43 -22 -93.5 -35t-113.5 -13z" />
-<glyph unicode="Ẁ" horiz-adv-x="1745" d="M1653 1434l-310 -1434h-192l-248 930q-15 57 -23.5 92.5t-11.5 62.5q-3 -27 -11 -62.5t-23 -92.5l-242 -930h-193l-307 1434h219l178 -883l20.5 -100t10.5 -66q3 28 12.5 65.5l24.5 100.5l225 883h179l231 -881q17 -62 25.5 -99t11.5 -65q3 28 9.5 66t18.5 98l181 881 h215zM914 1569l-365 319h291l291 -319h-217z" />
-<glyph unicode="ẁ" horiz-adv-x="1487" d="M1137 0h-189l-174 608l-10.5 37t-10 36.5t-8 32.5t-4.5 25q-2 -10 -4.5 -25.5t-6.5 -33.5t-9 -37l-10 -37l-172 -606h-189l-278 1047h209l147 -652q8 -34 12.5 -59.5l8.5 -46.5q3 21 8.5 48t13.5 58l174 652h197l174 -652q8 -31 13.5 -58t8.5 -48q3 21 8 46.5t13 59.5 l147 652h209zM785 1180l-365 319h291l291 -319h-217z" />
-<glyph unicode="Ẃ" horiz-adv-x="1745" d="M1653 1434l-310 -1434h-192l-248 930q-15 57 -23.5 92.5t-11.5 62.5q-3 -27 -11 -62.5t-23 -92.5l-242 -930h-193l-307 1434h219l178 -883l20.5 -100t10.5 -66q3 28 12.5 65.5l24.5 100.5l225 883h179l231 -881q17 -62 25.5 -99t11.5 -65q3 28 9.5 66t18.5 98l181 881 h215zM832 1569h-217l290 319h291z" />
-<glyph unicode="ẃ" horiz-adv-x="1487" d="M1137 0h-189l-174 608l-10.5 37t-10 36.5t-8 32.5t-4.5 25q-2 -10 -4.5 -25.5t-6.5 -33.5t-9 -37l-10 -37l-172 -606h-189l-278 1047h209l147 -652q8 -34 12.5 -59.5l8.5 -46.5q3 21 8.5 48t13.5 58l174 652h197l174 -652q8 -31 13.5 -58t8.5 -48q3 21 8 46.5t13 59.5 l147 652h209zM703 1180h-217l290 319h291z" />
-<glyph unicode="Ẅ" horiz-adv-x="1745" d="M1653 1434l-310 -1434h-192l-248 930q-15 57 -23.5 92.5t-11.5 62.5q-3 -27 -11 -62.5t-23 -92.5l-242 -930h-193l-307 1434h219l178 -883l20.5 -100t10.5 -66q3 28 12.5 65.5l24.5 100.5l225 883h179l231 -881q17 -62 25.5 -99t11.5 -65q3 28 9.5 66t18.5 98l181 881 h215zM688 1599q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM1057 1599q-27 0 -50.5 10t-41 [...]
-<glyph unicode="ẅ" horiz-adv-x="1487" d="M1137 0h-189l-174 608l-10.5 37t-10 36.5t-8 32.5t-4.5 25q-2 -10 -4.5 -25.5t-6.5 -33.5t-9 -37l-10 -37l-172 -606h-189l-278 1047h209l147 -652q8 -34 12.5 -59.5l8.5 -46.5q3 21 8.5 48t13.5 58l174 652h197l174 -652q8 -31 13.5 -58t8.5 -48q3 21 8 46.5t13 59.5 l147 652h209zM559 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10zM928 1210q-27 0 -50.5 [...]
-<glyph unicode="Ỳ" horiz-adv-x="1364" d="M575 0v561l-503 873h243l367 -656l367 656h243l-504 -871v-563h-213zM682 1569l-365 319h291l291 -319h-217z" />
-<glyph unicode="ỳ" horiz-adv-x="1102" d="M498 -397h-217l161 415l-391 1029h211l258 -685q11 -29 17 -48t12 -44q6 25 12 44t17 48l260 685h213zM590 1180l-365 319h291l291 -319h-217z" />
-<glyph unicode=" " horiz-adv-x="988" />
-<glyph unicode=" " horiz-adv-x="1976" />
-<glyph unicode=" " horiz-adv-x="988" />
-<glyph unicode=" " horiz-adv-x="1976" />
-<glyph unicode=" " horiz-adv-x="658" />
-<glyph unicode=" " horiz-adv-x="494" />
-<glyph unicode=" " horiz-adv-x="329" />
-<glyph unicode=" " horiz-adv-x="329" />
-<glyph unicode=" " horiz-adv-x="247" />
-<glyph unicode=" " horiz-adv-x="395" />
-<glyph unicode=" " horiz-adv-x="109" />
-<glyph unicode="‐" horiz-adv-x="809" d="M154 506v205h501v-205h-501z" />
-<glyph unicode="‑" horiz-adv-x="809" d="M154 506v205h501v-205h-501z" />
-<glyph unicode="‒" horiz-adv-x="809" d="M154 506v205h501v-205h-501z" />
-<glyph unicode="–" horiz-adv-x="1014" d="M102 496v184h809v-184h-809z" />
-<glyph unicode="—" horiz-adv-x="1178" d="M-10 496v184h1198v-184h-1198z" />
-<glyph unicode="―" horiz-adv-x="1178" d="M-10 496v184h1198v-184h-1198z" />
-<glyph unicode="‘" horiz-adv-x="457" d="M233 895q-32 0 -61.5 11.5t-51.5 34.5t-35 58t-13 82q0 59 17.5 114.5t47.5 104.5t70.5 89t85.5 69l80 -51q-14 -12 -33.5 -34.5t-38.5 -51t-34 -61t-19 -66.5q27 -2 51.5 -13t43.5 -30t30.5 -44t11.5 -56q0 -36 -12.5 -64.5t-33.5 -49t-48.5 -31.5t-57.5 -11z" />
-<glyph unicode="’" horiz-adv-x="457" d="M84 946q14 12 33.5 34.5t38.5 51t33.5 61t19.5 66.5q-27 2 -52 13.5t-43.5 30t-30 44t-11.5 56.5q0 35 12.5 63.5t33 49t48 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-47.5 -104.5t-70.5 -89t-85.5 -68.5z" />
-<glyph unicode="‚" horiz-adv-x="457" d="M84 -238q14 12 33.5 34.5t38.5 51t33.5 61t19.5 66.5q-27 3 -52 14t-43.5 30t-30 44t-11.5 56q0 35 12.5 63.5t33 49t48 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-47.5 -104.5t-70.5 -89t-85.5 -68.5z" />
-<glyph unicode="‛" horiz-adv-x="457" d="M293 895q-45 28 -85.5 68.5t-70.5 89t-47.5 104.5t-17.5 115q0 47 13 82t35 58t51.5 34.5t61.5 11.5q30 0 57.5 -11t48.5 -31.5t33.5 -49t12.5 -63.5q0 -31 -11.5 -56.5t-30.5 -44t-43.5 -30t-51.5 -13.5q4 -34 19 -66.5t34 -61t38.5 -51t33.5 -34.5z" />
-<glyph unicode="“" horiz-adv-x="913" d="M233 895q-32 0 -61.5 11.5t-51.5 34.5t-35 58t-13 82q0 59 17.5 114.5t47.5 104.5t70.5 89t85.5 69l80 -51q-14 -12 -33.5 -34.5t-38.5 -51t-34 -61t-19 -66.5q27 -2 51.5 -13t43.5 -30t30.5 -44t11.5 -56q0 -36 -12.5 -64.5t-33.5 -49t-48.5 -31.5t-57.5 -11zM690 895 q-32 0 -61.5 11.5t-52 34.5t-35.5 58t-13 82q0 59 17.5 114.5t48 104.5t71 89t85.5 69l79 -51q-14 -12 -33 -34.5t-38 -51t-34 -61t-19 -66.5q27 -2 51.5 -13t43.5 -30t30.5 -44t11.5 -56q0 -36 -12.5 -64.5t-3 [...]
-<glyph unicode="”" horiz-adv-x="913" d="M84 946q14 12 33.5 34.5t38.5 51t33.5 61t19.5 66.5q-27 2 -52 13.5t-43.5 30t-30 44t-11.5 56.5q0 35 12.5 63.5t33 49t48 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-47.5 -104.5t-70.5 -89t-85.5 -68.5zM541 946q14 12 33.5 34.5 t38 51t33.5 61t20 66.5q-27 2 -52 13.5t-44 30t-30.5 44t-11.5 56.5q0 35 12.5 63.5t33.5 49t48.5 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-48 -104.5t-70.5 -89t-85 -68.5z" />
-<glyph unicode="„" horiz-adv-x="913" d="M84 -238q14 12 33.5 34.5t38.5 51t33.5 61t19.5 66.5q-27 3 -52 14t-43.5 30t-30 44t-11.5 56q0 35 12.5 63.5t33 49t48 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-47.5 -104.5t-70.5 -89t-85.5 -68.5zM541 -238q14 12 33.5 34.5t38 51 t33.5 61t20 66.5q-27 3 -52 14t-44 30t-30.5 44t-11.5 56q0 35 12.5 63.5t33.5 49t48.5 31.5t57.5 11q32 0 61.5 -11.5t52 -34.5t35.5 -58t13 -82q0 -59 -17.5 -115t-48 -104.5t-70.5 -89t-85 -68.5z" />
-<glyph unicode="†" horiz-adv-x="1147" d="M659 899l21 -211l-64 -977h-81l-68 977l18 211l-239 -33h-113v205h113l237 -35l-16 166v256h213v-256l-18 -166l239 35h113v-205h-113z" />
-<glyph unicode="‡" horiz-adv-x="1147" d="M901 98l-239 35l18 -166v-256h-213v256l16 166l-237 -35h-113v205h113l239 -33l-18 211v207l18 211l-239 -33h-113v205h113l237 -35l-16 166v256h213v-256l-18 -166l239 35h113v-205h-113l-242 33l21 -211v-207l-21 -211l242 33h113v-205h-113z" />
-<glyph unicode="•" horiz-adv-x="831" d="M414 436q-62 0 -115.5 22t-92.5 60t-61 89t-22 110q0 58 22 109.5t61 90.5t92.5 61.5t115.5 22.5t116 -22.5t93.5 -61.5t62.5 -90.5t23 -109.5q0 -59 -23 -110t-62.5 -89t-93.5 -60t-116 -22z" />
-<glyph unicode="…" horiz-adv-x="1911" d="M317 -25q-28 0 -52.5 10t-42.5 27.5t-28 41t-10 50.5t10 51t28 42t42.5 28.5t52.5 10.5t53 -10.5t43.5 -28.5t29 -42t10.5 -51t-10.5 -50.5t-29 -41t-43.5 -27.5t-53 -10zM954 -25q-28 0 -52.5 10t-42.5 27.5t-28 41t-10 50.5t10 51t28 42t42.5 28.5t52.5 10.5t53 -10.5 t43.5 -28.5t29 -42t10.5 -51t-10.5 -50.5t-29 -41t-43.5 -27.5t-53 -10zM1591 -25q-28 0 -52.5 10t-42.5 27.5t-28 41t-10 50.5t10 51t28 42t42.5 28.5t52.5 10.5t53 -10.5t43 -28.5t28.5 -42t10.5 -51t-10.5 [...]
-<glyph unicode=" " horiz-adv-x="395" />
-<glyph unicode="‰" horiz-adv-x="2542" d="M428 729q-92 0 -156 33t-104 85t-58 117t-18 130q0 60 17.5 124t57.5 117.5t104 88t157 34.5q92 0 156 -32.5t104 -85t58 -117.5t18 -129q0 -59 -17 -123.5t-56.5 -118t-104 -88.5t-158.5 -35zM428 893q42 0 73 16t51.5 43.5t30 64t9.5 77.5q0 43 -10 80t-30.5 63.5 t-51 41.5t-72.5 15t-73 -16t-51.5 -43t-30 -63.5t-9.5 -77.5q0 -44 10 -81t30.5 -63.5t51 -41.5t72.5 -15zM479 -25h-194l979 1483h194zM1315 -25q-92 0 -156 33t-104 85t-58 117t-18 130q0 60 17.5 124.5t57.5 1 [...]
-<glyph unicode="′" horiz-adv-x="451" d="M113 1495h266l-127 -655h-180z" />
-<glyph unicode="″" horiz-adv-x="860" d="M113 1495h266l-127 -655h-180zM522 1495h266l-126 -655h-181z" />
-<glyph unicode="‹" horiz-adv-x="635" d="M373 0l-312 524l312 523h231l-311 -523l311 -524h-231z" />
-<glyph unicode="›" horiz-adv-x="635" d="M31 0l311 524l-311 523h231l311 -523l-311 -524h-231z" />
-<glyph unicode="⁄" horiz-adv-x="682" d="M-246 -25l979 1483h195l-979 -1483h-195z" />
-<glyph unicode=" " horiz-adv-x="494" />
-<glyph unicode="ⁿ" horiz-adv-x="940" d="M641 580v503q0 55 -12 93t-33 62t-50 35t-63 11q-37 0 -68.5 -10t-54.5 -33t-36 -61t-13 -93v-507h-188v858h188v-90q37 51 93 80.5t137 29.5q62 0 114.5 -19.5t90.5 -63t59.5 -113.5t21.5 -170v-512h-186z" />
-<glyph unicode="₤" horiz-adv-x="1206" d="M164 0v213q31 15 55 37.5t43 50.5t32.5 60t22.5 65h-163v180h182q-3 38 -13 71t-22 64h-147v181h75q-8 26 -11 56.5t-3 68.5q0 78 28.5 152t87 131.5t148 92.5t211.5 35q71 0 130 -9.5t111 -27.5t99.5 -45t95.5 -61l-84 -203l-56 47t-72.5 46t-96.5 34.5t-127 13.5 q-68 0 -116 -18t-78.5 -47t-45 -66t-14.5 -75q0 -35 7 -65.5t20 -59.5h405v-181h-333q9 -31 14.5 -64t7.5 -71h311v-180h-329q-37 -118 -131 -221h737v-205h-981z" />
-<glyph unicode="€" horiz-adv-x="1290" d="M496 444q15 -57 38.5 -105t58.5 -83.5t81 -55.5t106 -20q79 0 141.5 40t108.5 108l178 -111q-32 -54 -75 -99t-96.5 -76.5t-117.5 -49t-139 -17.5q-120 0 -208 37.5t-149 101t-98.5 149t-56.5 181.5h-188l76 181h90q-2 23 -2 46v46v30.5t2 32.5h-164l76 181h110 q20 97 60 186.5t104 158.5t155 110.5t214 41.5q96 0 187.5 -27t187.5 -96l-84 -202q-26 22 -55.5 43.5t-65.5 38.5t-78 27.5t-92 10.5q-68 0 -119 -23.5t-88.5 -63t-62.5 -92.5t-39 -113h528l-76 -181h-477q-2 -17 -2 [...]
-<glyph unicode="℅" horiz-adv-x="2126" d="M481 557q-90 0 -160 30.5t-119 88.5t-74.5 142t-25.5 190t26.5 189.5t76 141.5t119 88.5t155.5 30.5q78 0 134 -20t95.5 -52.5t64 -73t38.5 -81.5l-178 -60q-7 22 -19 42t-30 36t-41.5 25.5t-53.5 9.5q-48 0 -86 -19t-63.5 -55t-39 -87t-13.5 -115q0 -63 13.5 -114 t38 -86.5t60 -55t80.5 -19.5q68 0 103.5 33t54.5 92l172 -53q-35 -125 -115 -186.5t-213 -61.5zM1628 -25q-90 0 -162 30.5t-123 88t-78.5 141t-27.5 189.5t27.5 189.5t78.5 141.5t123 88.5t162 30.5t163 -30.5t12 [...]
-<glyph unicode="ℓ" horiz-adv-x="952" d="M502 -25q-48 0 -89.5 14t-73 44t-49.5 78.5t-18 117.5v226q-26 -16 -50.5 -27.5l-41.5 -19.5l-67 170q18 7 39.5 17.5l43 22l41.5 23t35 21.5v368q0 122 20.5 204.5t59 132t92.5 70.5t121 21q62 0 116 -19.5t93.5 -58t62.5 -96.5t23 -135q0 -75 -17.5 -139.5t-46.5 -119 t-67 -101t-80 -85.5t-84.5 -72l-79.5 -61v-266q0 -43 7 -69t21 -40.5t33.5 -19t45.5 -4.5q15 0 32.5 4.5t36 14.5t37 24.5t33.5 34.5l60 -139q-24 -30 -56 -55t-69.5 -43t-79 -28t-84.5 -10zM567 1284q-22 0 - [...]
-<glyph unicode="№" horiz-adv-x="2425" d="M1069 0l-620 973l-58 106l6 -106v-973h-213v1434h215l605 -957l59 -110l-6 108v959h213v-1434h-201zM1485 242v159h786v-159h-786zM1876 559q-90 0 -162.5 30.5t-123 88t-78 141t-27.5 189.5t27.5 189.5t78 141.5t123 88.5t162.5 30.5t163 -30.5t124.5 -88.5t79.5 -141.5 t28 -189.5t-28 -189.5t-79.5 -141t-124.5 -88t-163 -30.5zM1876 733q100 0 154.5 72.5t54.5 202.5q0 131 -54.5 203.5t-154.5 72.5q-97 0 -152 -72.5t-55 -203.5q0 -129 55 -202t152 -73z" />
-<glyph unicode="™" horiz-adv-x="1640" d="M424 1286v-717h-154v717h-229v148h610v-148h-227zM1346 569v516l-19 -51l-213 -497l-215 497l-20 58v-523h-154v865h158l215 -502l14 -41l14 41l215 502h156v-865h-151z" />
-<glyph unicode="Ω" horiz-adv-x="1536" d="M874 0v186q67 72 117 138.5t83.5 138.5t50 153t16.5 181q0 83 -19 164.5t-63 146.5t-115 105t-176 40t-176 -40t-115 -105t-63 -147t-19 -166q0 -99 16.5 -180t50 -152.5t83.5 -138t117 -138.5v-186h-498v205h291q-133 99 -207 247t-74 347q0 77 13.5 155.5t43 152 t75.5 137.5t112 111.5t153 75t197 27.5t197 -27.5t153 -75t112 -111.5t75.5 -137.5t43 -152t13.5 -155.5q0 -198 -75.5 -347t-207.5 -247h293v-205h-498z" />
-<glyph unicode="℮" horiz-adv-x="1597" d="M809 -25q-96 0 -186 23.5t-168.5 68t-143.5 109.5t-111.5 147.5t-72 181.5t-25.5 212q0 125 27.5 229.5t76 187.5t115.5 145t146.5 103t167.5 61.5t180 20.5q130 0 250 -47t213 -141t151 -233.5t66 -325.5h-1071v-449q24 -35 62.5 -66t88 -54t108.5 -36.5t126 -13.5 q146 0 272 67t224 218l104 -64q-52 -78 -111 -141.5t-131 -108.5t-160 -69.5t-198 -24.5zM1184 1182q-29 34 -69 63t-88.5 50.5t-104 33.5t-113.5 12q-121 0 -217 -36.5t-168 -104.5v-364l760 2v344z" />
-<glyph unicode="⅓" horiz-adv-x="1485" d="M20 -25l979 1483h195l-979 -1483h-195zM143 580v684h-113v125q32 0 55.5 3t40.5 10t29 17t21 25h135v-864h-168zM1112 -16q-119 0 -199 66.5t-114 185.5l168 45q8 -36 24 -62.5t36 -43t43.5 -24.5t47.5 -8q53 0 88 28.5t35 82.5q0 21 -7.5 41t-23.5 35.5t-41 25t-59 9.5 h-64v159h62q54 0 78 28t24 69q0 42 -25 70t-73 28t-75.5 -28t-43.5 -73l-164 60q37 91 110.5 146t176.5 55q64 0 114 -20t84.5 -54.5t53 -79.5t18.5 -96q0 -53 -27.5 -101t-72.5 -77q62 -28 96.5 -82t34.5 -1 [...]
-<glyph unicode="⅔" horiz-adv-x="1812" d="M348 -25l979 1483h195l-979 -1483h-195zM51 580q0 92 24 167.5t65.5 135.5t96 105t115.5 77q25 13 47 26t39 28t27 34t10 45q0 40 -30 70t-87 30q-58 0 -94.5 -35t-48.5 -98l-166 39q11 55 39 101.5t68.5 80.5t92 53t109.5 19q69 0 123.5 -19.5t92.5 -54.5t58.5 -83.5 t20.5 -106.5q0 -57 -18 -98t-49.5 -72t-74 -56t-91.5 -50q-71 -38 -109.5 -83.5t-60.5 -95.5h401v-159h-600zM1440 -16q-119 0 -199 66.5t-114 185.5l168 45q8 -36 24 -62.5t36 -43t43.5 -24.5t47.5 -8q53 0 88 [...]
-<glyph unicode="⅛" horiz-adv-x="1485" d="M20 -25l979 1483h195l-979 -1483h-195zM143 580v684h-113v125q32 0 55.5 3t40.5 10t29 17t21 25h135v-864h-168zM1110 -16q-69 0 -125.5 20t-97 56t-62.5 87.5t-22 114.5q0 68 34.5 121.5t89.5 87.5q-43 31 -65.5 73t-22.5 95q0 54 19.5 98t55 75.5t85.5 49t111 17.5 t111 -17.5t85.5 -49t54.5 -75.5t19 -98q0 -48 -22 -92.5t-64 -75.5q56 -34 89.5 -88.5t33.5 -120.5q0 -63 -22 -114.5t-62.5 -87.5t-97 -56t-125.5 -20zM1110 139q62 0 99.5 34.5t37.5 88.5q0 27 -10 50.5t-28 4 [...]
-<glyph unicode="⅜" horiz-adv-x="1731" d="M266 -25l979 1483h195l-979 -1483h-195zM338 563q-119 0 -199 66.5t-114 185.5l168 45q17 -72 60 -104.5t91 -32.5q53 0 88 28.5t35 82.5q0 21 -7.5 41t-23.5 35.5t-41 24.5t-59 9h-64v160h62q54 0 78 28t24 68q0 42 -25 70t-73 28t-75.5 -27.5t-43.5 -72.5l-164 59 q37 91 110.5 146t176.5 55q64 0 114 -20t84.5 -54.5t53 -79.5t18.5 -96q0 -52 -27.5 -100.5t-72.5 -77.5q62 -28 96.5 -82t34.5 -117q0 -61 -22.5 -110.5t-62.5 -84.5t-96.5 -54t-123.5 -19zM1356 -16q-69 0 -125 [...]
-<glyph unicode="⅝" horiz-adv-x="1731" d="M266 -25l979 1483h195l-979 -1483h-195zM326 565q-53 0 -97 8.5t-79.5 21.5t-62 29t-44.5 31l96 140l36 -24.5t41.5 -22.5t49.5 -16.5t60 -6.5q36 0 65.5 10.5t51 29.5t33 45t11.5 58q0 63 -35 95t-92 32q-48 0 -80 -25.5t-55 -68.5l-151 78l24 465h508v-160h-354 l-10 -164q28 17 61.5 26t67.5 9q73 0 127.5 -25t90 -65t53 -90.5t17.5 -102.5q0 -71 -24.5 -127.5t-68.5 -96.5t-105.5 -61.5t-134.5 -21.5zM1356 -16q-69 0 -125.5 20t-97 56t-62.5 87.5t-22 114.5q0 68 34.5 121. [...]
-<glyph unicode="⅞" horiz-adv-x="1546" d="M82 -25l979 1483h195l-979 -1483h-195zM190 580q1 109 16 203.5t42.5 180t66.5 164.5t88 156h-393v160h590v-150q-70 -81 -116 -176.5t-73 -191.5t-38.5 -186t-12.5 -160h-170zM1172 -16q-69 0 -125.5 20t-97 56t-62.5 87.5t-22 114.5q0 68 34.5 121.5t89.5 87.5 q-43 31 -65.5 73t-22.5 95q0 54 19.5 98t55 75.5t85.5 49t111 17.5t111 -17.5t85.5 -49t54.5 -75.5t19 -98q0 -48 -22 -92.5t-64 -75.5q56 -34 89.5 -88.5t33.5 -120.5q0 -63 -22 -114.5t-62.5 -87.5t-97 -56t-125.5 [...]
-<glyph unicode="∂" horiz-adv-x="1194" d="M451 1458q169 -18 289 -76t195.5 -156t110.5 -234t35 -310q0 -91 -13.5 -179t-42 -166.5t-74 -145t-109 -114.5t-146 -75t-186.5 -27q-101 0 -184 30.5t-142 86.5t-91 135.5t-32 178.5q0 114 41 205.5t110 156t158 99t185 34.5q61 0 109 -11t85.5 -30.5t66 -46.5 t52.5 -59q0 135 -39.5 229t-105 153t-150 87.5t-174.5 33.5zM571 696q-66 0 -120 -20t-92 -56t-59 -86.5t-21 -111.5q0 -57 17 -102t48.5 -76t75.5 -47.5t98 -16.5q70 0 127 21t97 58.5t61.5 89.5t21.5 114q0 117 -6 [...]
-<glyph unicode="∆" horiz-adv-x="1425" d="M86 0l518 1434h219l516 -1434h-1253zM764 1006q-5 16 -12 36.5l-14 42t-13 43t-10 39.5l-22.5 -80.5t-26.5 -80.5l-287 -801h670z" />
-<glyph unicode="∏" horiz-adv-x="1468" d="M1071 -287v1516h-674v-1516h-213v1721h1100v-1721h-213z" />
-<glyph unicode="∑" horiz-adv-x="1253" d="M92 -287v193l504 688l-475 647v193h1022v-205h-750l469 -635l-495 -676h815v-205h-1090z" />
-<glyph unicode="−" horiz-adv-x="1290" d="M164 600v205h962v-205h-962z" />
-<glyph unicode="∕" horiz-adv-x="682" d="M-246 -25l979 1483h195l-979 -1483h-195z" />
-<glyph unicode="∙" horiz-adv-x="457" d="M227 565q-33 0 -61 12t-48.5 32.5t-32 47.5t-11.5 58t11.5 58.5t32 48t48.5 32.5t61 12q32 0 60.5 -12t49.5 -32.5t33.5 -48t12.5 -58.5t-12.5 -58t-33.5 -47.5t-49.5 -32.5t-60.5 -12z" />
-<glyph unicode="√" horiz-adv-x="1139" d="M1077 1434l-436 -1434h-180l-369 700h225l211 -409l330 1143h219z" />
-<glyph unicode="∞" horiz-adv-x="2048" d="M1489 242q-85 0 -155 22.5t-127 62.5t-102.5 93t-80.5 115q-36 -62 -81 -115t-102 -93t-127 -62.5t-155 -22.5q-102 0 -191 31.5t-154.5 92.5t-103.5 149t-38 202t38 202.5t103.5 149.5t154.5 93t191 32q85 0 155 -23t127 -63t102 -94t81 -115q35 61 80 115t101.5 94 t126 63t153.5 23q103 0 193 -32t156 -93t104 -149.5t38 -202.5t-38 -202t-103.5 -149t-154.5 -92.5t-191 -31.5zM1479 442q60 0 112 18.5t90.5 53.5t60 86.5t21.5 116.5t-21.5 116.5t-60 87t-90.5 54t-112 18.5q [...]
-<glyph unicode="∫" horiz-adv-x="860" d="M297 -287q-66 0 -117.5 13t-97.5 36v168q48 -17 82.5 -24.5t66.5 -7.5q52 0 72.5 27t20.5 67v1222q0 72 20 119.5t53 75t76 38.5t90 11q66 0 117.5 -13t97.5 -36v-168q-48 17 -82.5 25t-66.5 8q-26 0 -44 -7t-28.5 -20t-15 -30t-4.5 -37v-1223q0 -72 -20 -119.5t-53 -75 t-76.5 -38.5t-90.5 -11z" />
-<glyph unicode="≈" horiz-adv-x="1290" d="M852 752q-65 0 -121 18.5t-105.5 40.5l-95 40.5t-90.5 18.5q-55 0 -86.5 -33t-36.5 -85h-153q0 64 15 122.5t48 103t85 71t126 26.5q65 0 121 -18.5t105.5 -41t95 -41t90.5 -18.5q56 0 87 33.5t36 85.5h153q0 -64 -15 -122.5t-47.5 -103t-84.5 -71t-127 -26.5zM852 332 q-65 0 -121 18.5t-105.5 41t-95 41t-90.5 18.5q-55 0 -86.5 -33.5t-36.5 -85.5h-153q0 64 15 122.5t48 103t85 71t126 26.5q65 0 121 -18.5t105.5 -40.5l95 -40.5t90.5 -18.5q56 0 87 33t36 85h153q0 -64 -15 [...]
-<glyph unicode="≠" horiz-adv-x="1290" d="M614 399l-82 -178h-225l82 178h-225v209h323l88 189h-411v209h508l84 178h225l-84 -178h229v-209h-325l-88 -189h413v-209h-512z" />
-<glyph unicode="≤" horiz-adv-x="1290" d="M1126 352l-962 350v181l962 350v-215l-671 -225l671 -226v-215zM164 0v205h962v-205h-962z" />
-<glyph unicode="≥" horiz-adv-x="1290" d="M164 352v215l672 226l-672 225v215l962 -350v-181zM164 0v205h962v-205h-962z" />
-<glyph unicode="◊" horiz-adv-x="1229" d="M524 -25l-411 742l411 741h181l411 -741l-411 -742h-181zM614 1188l-282 -471l282 -471l283 471z" />
-<glyph unicode="◌" horiz-adv-x="1319" d="M113 735q0 29 18.5 46.5t46.5 17.5t45 -17.5t17 -46.5q0 -28 -17 -44.5t-45 -16.5t-46.5 16.5t-18.5 44.5zM1079 735q0 29 17 46.5t45 17.5q26 0 45.5 -17.5t19.5 -46.5q0 -28 -19.5 -44.5t-45.5 -16.5q-28 0 -45 16.5t-17 44.5zM598 254q0 28 17 45.5t44 17.5 q28 0 45 -17.5t17 -45.5t-17 -46t-45 -18q-27 0 -44 18t-17 46zM598 1217q0 29 17 47t44 18q28 0 45 -18t17 -47q0 -28 -17 -45t-45 -17q-27 0 -44 17t-17 45zM938 1077q0 28 17 45t44 17q28 0 46 -17t18 -45q0 -29 -1 [...]
-<glyph unicode="" horiz-adv-x="1045" d="M0 1045h1045v-1045h-1045v1045z" />
-<glyph unicode="" horiz-adv-x="786" d="M393 -14q-92 0 -153 39.5t-97 103.5t-50.5 143.5t-14.5 159.5q0 50 6 103t20.5 102.5t38 93.5t58.5 77t82.5 52t109.5 19q92 0 153 -40t97 -103.5t51 -143.5t15 -160q0 -50 -6 -102.5t-20 -102t-37.5 -93.5t-58.5 -77t-83 -52t-111 -19zM393 150q42 0 70 26t44.5 66.5 t23 91t6.5 98.5q0 51 -7 102t-23.5 91t-44 65t-69.5 25t-69.5 -26t-44 -66.5t-23 -91t-6.5 -99.5q0 -51 7 -101.5t23.5 -90.5t44 -65t68.5 -25z" />
-<glyph unicode="" horiz-adv-x="410" d="M133 0v684h-113v125q32 0 55.5 3t40.5 10t29 17t21 25h135v-864h-168z" />
-<glyph unicode="" horiz-adv-x="731" d="M51 0q0 92 24 167.5t65.5 135.5t96 105t115.5 77q25 13 47 26t39 28.5t27 34.5t10 44q0 41 -30 71t-87 30q-58 0 -94.5 -35t-48.5 -98l-166 39q11 54 39 100.5t68.5 80.5t92 53.5t109.5 19.5q69 0 123.5 -20t92.5 -55t58.5 -83.5t20.5 -106.5q0 -57 -18 -97.5 t-49.5 -71.5t-74 -56.5t-91.5 -50.5q-71 -37 -109.5 -83t-60.5 -95h401v-160h-600z" />
-<glyph unicode="" horiz-adv-x="711" d="M338 -16q-119 0 -199 66.5t-114 185.5l168 45q8 -36 24 -62.5t36 -43t43.5 -24.5t47.5 -8q53 0 88 28.5t35 82.5q0 21 -7.5 41t-23.5 35.5t-41 25t-59 9.5h-64v159h62q54 0 78 28t24 69q0 42 -25 70t-73 28t-75.5 -28t-43.5 -73l-164 60q37 91 110.5 146t176.5 55 q64 0 114 -20t84.5 -54.5t53 -79.5t18.5 -96q0 -53 -27.5 -101t-72.5 -77q62 -28 96.5 -82t34.5 -117q0 -62 -22.5 -111.5t-62.5 -84t-96.5 -53.5t-123.5 -19z" />
-<glyph unicode="" horiz-adv-x="770" d="M449 0v176h-435v109l441 581h157v-532h111v-158h-111v-176h-163zM449 334v291l-224 -291h224z" />
-<glyph unicode="" horiz-adv-x="717" d="M326 -14q-53 0 -97 8.5t-79.5 21.5t-62 29t-44.5 31l96 139l36 -24.5t41.5 -22.5t49.5 -16.5t60 -6.5q36 0 65.5 10.5t51 30t33 45.5t11.5 58q0 63 -35 95t-92 32q-48 0 -80 -25.5t-55 -68.5l-151 77l24 465h508v-159h-354l-10 -164q28 17 61.5 25.5t67.5 8.5 q73 0 127.5 -25t90 -65t53 -90.5t17.5 -101.5q0 -71 -24.5 -128t-68.5 -96.5t-105.5 -61t-134.5 -21.5z" />
-<glyph unicode="" horiz-adv-x="725" d="M379 -14q-78 0 -136 26t-96 72t-56.5 109.5t-18.5 138.5q0 106 29 197.5t90 162.5t153.5 119t220.5 68v-160q-45 -8 -89 -23t-83 -39t-70 -58.5t-49 -82.5q25 14 61 22.5t77 8.5q58 0 108 -19.5t86.5 -56t57 -88.5t20.5 -117q0 -64 -22 -115.5t-62 -88t-96.5 -56.5 t-124.5 -20zM379 145q60 0 98.5 33t38.5 88q0 59 -38.5 90t-98.5 31t-96.5 -31t-36.5 -90q0 -55 36.5 -88t96.5 -33z" />
-<glyph unicode="" horiz-adv-x="621" d="M190 0q1 109 16 204t42.5 180.5t66.5 164.5t88 156h-393v159h590v-149q-70 -81 -116 -176.5t-73 -192t-38.5 -186.5t-12.5 -160h-170z" />
-<glyph unicode="" horiz-adv-x="748" d="M373 -16q-69 0 -125.5 20t-97 56t-62.5 87.5t-22 114.5q0 68 34.5 121.5t89.5 87.5q-43 31 -65.5 73t-22.5 95q0 54 19.5 98t55 75.5t85.5 49t111 17.5t111 -17.5t85.5 -49t54.5 -75.5t19 -98q0 -48 -22 -92.5t-64 -75.5q56 -34 89.5 -88.5t33.5 -120.5q0 -63 -22 -114.5 t-62.5 -87.5t-97 -56t-125.5 -20zM373 139q62 0 99.5 34.5t37.5 88.5q0 27 -10 50.5t-28 41t-43 27.5t-56 10q-30 0 -55 -10t-43.5 -27.5t-28.5 -41t-10 -50.5t10 -49.5t28.5 -39t43.5 -25.5t55 -9zM373 547q [...]
-<glyph unicode="" horiz-adv-x="725" d="M186 145q42 3 83 14t77 34t65.5 61.5t47.5 97.5q-23 -17 -58 -27t-73 -10q-64 0 -116.5 19.5t-90.5 56t-59 88.5t-21 117q0 64 22.5 116t63.5 89t97 57.5t124 20.5q80 0 139 -25t98 -70t58 -106.5t19 -134.5q0 -121 -29 -221t-88 -172.5t-148.5 -115t-210.5 -48.5v159z M346 475q65 0 102 32t37 91q0 56 -37.5 88.5t-99.5 32.5q-60 0 -97.5 -33t-37.5 -90q0 -59 35.5 -90t97.5 -31z" />
-<glyph unicode="" horiz-adv-x="786" d="M393 565q-62 0 -109 18.5t-82 50t-58.5 75t-38.5 93t-21 103.5t-6 107q0 75 14.5 154t50 144t96.5 106.5t154 41.5q92 0 153 -39.5t97 -103.5t51 -143.5t15 -159.5q0 -50 -6 -102.5t-20 -102t-37.5 -93.5t-58.5 -77t-83 -52.5t-111 -19.5zM393 729q42 0 70 26t44.5 67 t23 91.5t6.5 98.5q0 51 -7 101.5t-23.5 90.5t-44 65t-69.5 25t-69.5 -25.5t-44 -66t-23 -91t-6.5 -99.5q0 -52 7 -102.5t23.5 -90.5t44 -65t68.5 -25z" />
-<glyph unicode="" horiz-adv-x="410" d="M133 580v684h-113v125q32 0 55.5 3t40.5 10t29 17t21 25h135v-864h-168z" />
-<glyph unicode="" horiz-adv-x="731" d="M51 580q0 92 24 167.5t65.5 135.5t96 105t115.5 77q25 13 47 26t39 28t27 34t10 45q0 40 -30 70t-87 30q-58 0 -94.5 -35t-48.5 -98l-166 39q11 55 39 101.5t68.5 80.5t92 53t109.5 19q69 0 123.5 -19.5t92.5 -54.5t58.5 -83.5t20.5 -106.5q0 -57 -18 -98t-49.5 -72 t-74 -56t-91.5 -50q-71 -38 -109.5 -83.5t-60.5 -95.5h401v-159h-600z" />
-<glyph unicode="" horiz-adv-x="711" d="M338 563q-119 0 -199 66.5t-114 185.5l168 45q17 -72 60 -104.5t91 -32.5q53 0 88 28.5t35 82.5q0 21 -7.5 41t-23.5 35.5t-41 24.5t-59 9h-64v160h62q54 0 78 28t24 68q0 42 -25 70t-73 28t-75.5 -27.5t-43.5 -72.5l-164 59q37 91 110.5 146t176.5 55q64 0 114 -20 t84.5 -54.5t53 -79.5t18.5 -96q0 -52 -27.5 -100.5t-72.5 -77.5q62 -28 96.5 -82t34.5 -117q0 -61 -22.5 -110.5t-62.5 -84.5t-96.5 -54t-123.5 -19z" />
-<glyph unicode="" horiz-adv-x="770" d="M449 580v176h-435v108l441 582h157v-533h111v-157h-111v-176h-163zM449 913v291l-224 -291h224z" />
-<glyph unicode="" horiz-adv-x="717" d="M326 565q-53 0 -97 8.5t-79.5 21.5t-62 29t-44.5 31l96 140l36 -24.5t41.5 -22.5t49.5 -16.5t60 -6.5q36 0 65.5 10.5t51 29.5t33 45t11.5 58q0 63 -35 95t-92 32q-48 0 -80 -25.5t-55 -68.5l-151 78l24 465h508v-160h-354l-10 -164q28 17 61.5 26t67.5 9q73 0 127.5 -25 t90 -65t53 -90.5t17.5 -102.5q0 -71 -24.5 -127.5t-68.5 -96.5t-105.5 -61.5t-134.5 -21.5z" />
-<glyph unicode="" horiz-adv-x="725" d="M379 565q-78 0 -136 26t-96 72t-56.5 109.5t-18.5 138.5q0 107 29 198.5t90 162t153.5 118.5t220.5 68v-160q-45 -8 -89 -22.5t-83 -38.5t-70 -58.5t-49 -82.5q25 14 61 22t77 8q58 0 108 -19.5t86.5 -55.5t57 -88t20.5 -117q0 -64 -22 -116t-62 -88.5t-96.5 -56.5 t-124.5 -20zM379 725q60 0 98.5 32.5t38.5 88.5q0 59 -38.5 90t-98.5 31t-96.5 -31t-36.5 -90q0 -56 36.5 -88.5t96.5 -32.5z" />
-<glyph unicode="" horiz-adv-x="621" d="M190 580q1 109 16 203.5t42.5 180t66.5 164.5t88 156h-393v160h590v-150q-70 -81 -116 -176.5t-73 -191.5t-38.5 -186t-12.5 -160h-170z" />
-<glyph unicode="" horiz-adv-x="748" d="M373 563q-69 0 -125.5 20t-97 56.5t-62.5 88t-22 114.5q0 68 34.5 121.5t89.5 87.5q-43 31 -65.5 73t-22.5 95q0 54 19.5 98t55 75.5t85.5 48.5t111 17t111 -17t85.5 -48.5t54.5 -75.5t19 -98q0 -48 -22 -92.5t-64 -75.5q56 -34 89.5 -88.5t33.5 -120.5q0 -63 -22 -114.5 t-62.5 -88t-97 -56.5t-125.5 -20zM373 719q62 0 99.5 34.5t37.5 88.5q0 27 -10 50.5t-28 41t-43 27.5t-56 10q-30 0 -55 -10t-43.5 -27.5t-28.5 -41t-10 -50.5t10 -49.5t28.5 -39t43.5 -25.5t55 -9zM373 1126 [...]
-<glyph unicode="" horiz-adv-x="725" d="M186 725q42 3 83 13.5t77 33.5t65.5 62t47.5 98q-23 -17 -58 -27t-73 -10q-64 0 -116.5 19.5t-90.5 55.5t-59 88t-21 118q0 64 22.5 116t63.5 89t97 57t124 20q80 0 139 -25t98 -70t58 -106.5t19 -134.5q0 -120 -29 -220t-88 -172.5t-148.5 -115.5t-210.5 -49v160z M346 1055q65 0 102 32t37 91q0 55 -37.5 87.5t-99.5 32.5q-60 0 -97.5 -32.5t-37.5 -89.5q0 -59 35.5 -90t97.5 -31z" />
-<glyph unicode="" horiz-adv-x="819" d="M309 -496q12 11 26.5 26t28.5 33.5t26.5 40t21.5 44.5q-28 0 -52 9.5t-42 26t-28.5 39.5t-10.5 50q0 31 10.5 56t28.5 42.5t42 27t50 9.5q28 0 53 -10t43.5 -28.5t29.5 -46t11 -63.5q0 -83 -43.5 -159.5t-124.5 -141.5z" />
-<glyph unicode="" horiz-adv-x="819" d="M543 1495v-16q0 -151 -50.5 -269.5t-150.5 -216.5l-66 49q14 28 25 78t19 111t12 129.5t4 134.5h207z" />
-<glyph horiz-adv-x="1536" d="M874 0v186q67 72 117 138.5t83.5 138.5t50 153t16.5 181q0 83 -19 164.5t-63 146.5t-115 105t-176 40t-176 -40t-115 -105t-63 -147t-19 -166q0 -99 16.5 -180t50 -152.5t83.5 -138t117 -138.5v-186h-498v205h291q-133 99 -207 247t-74 347q0 77 13.5 155.5t43 152 t75.5 137.5t112 111.5t153 75t197 27.5t197 -27.5t153 -75t112 -111.5t75.5 -137.5t43 -152t13.5 -155.5q0 -198 -75.5 -347t-207.5 -247h293v-205h-498z" />
-<glyph d="M791 0v100q-51 -65 -115 -95t-135 -30q-122 0 -185 84v-368l-213 -101v1457h213v-625q0 -72 15 -119.5t42.5 -75.5t66 -39.5t85.5 -11.5q48 0 89 12.5t71.5 41t48 74.5t17.5 114v629h213v-1047h-213z" />
-<glyph horiz-adv-x="471" />
-<glyph horiz-adv-x="524" d="M156 0v1047h213v-1047h-213zM262 1210q-27 0 -50.5 10t-41 27.5t-27.5 41t-10 50.5t10 50.5t27.5 41t41 27.5t50.5 10t50.5 -10t41 -27.5t27.5 -41t10 -50.5t-10 -50.5t-27.5 -41t-41 -27.5t-50.5 -10z" />
-</font>
-</defs></svg>
\ No newline at end of file
diff --git a/pcsd/public/css/overpass_regular-web.ttf b/pcsd/public/css/overpass_regular-web.ttf
deleted file mode 100755
index 243b986..0000000
Binary files a/pcsd/public/css/overpass_regular-web.ttf and /dev/null differ
diff --git a/pcsd/public/css/overpass_regular-web.woff b/pcsd/public/css/overpass_regular-web.woff
deleted file mode 100755
index eb994e4..0000000
Binary files a/pcsd/public/css/overpass_regular-web.woff and /dev/null differ
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 6ef49e2..69b6e53 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -72,10 +72,12 @@ Pcs = Ember.Application.createWithMixins({
}
var out =
'<table class="darkdatatable"><tr><th>OPTION</th><th>VALUE</th></tr>\n';
- var banned_options = ["SBD_OPTS", "SBD_WATCHDOG_DEV", "SBD_PACEMAKER"];
+ var banned_options = [
+ "SBD_OPTS", "SBD_WATCHDOG_DEV", "SBD_PACEMAKER", "SBD_DEVICE"
+ ];
$.each(this.get("sbd_config"), function(opt, val) {
if (banned_options.indexOf(opt) == -1) {
- out += '<tr><td>' + opt + '</td><td>' + val + '</td></tr>\n';
+ out += '<tr><td>' + htmlEncode(opt) + '</td><td>' + htmlEncode(val) + '</td></tr>\n';
}
});
return out + '</table>';
@@ -273,7 +275,7 @@ Pcs.GroupSelectorComponent = Ember.Component.extend({
group_list: [],
group_select_content: function() {
var list = [];
- $.each(this.get("group_list"), function(_, group) {
+ $.each(this.getWithDefault("group_list", []), function(_, group) {
list.push({
name: group,
value: group
@@ -809,7 +811,7 @@ Pcs.resourcesContainer.reopen({
groups_enum: function() {
var self = this;
var res = [];
- $.each(self.get("group_list"), function(_, group) {
+ $.each(self.getWithDefault("group_list", []), function(_, group) {
res.push({
name: group,
value: group
@@ -879,7 +881,7 @@ Pcs.ResourceObj = Ember.Object.extend({
}.property("status_val"),
show_status: function() {
return '<span style="' + this.get('status_style') + '">'
- + this.get('status') + (this.get("is_unmanaged") ? " (unmanaged)" : "")
+ + htmlEncode(this.get('status')) + (this.get("is_unmanaged") ? " (unmanaged)" : "")
+ '</span>';
}.property("status_style", "disabled"),
status_class: function() {
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index 7f8d429..b7ad72f 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -102,9 +102,8 @@ function create_group() {
return;
}
var not_primitives = resource_list.filter(function(resource_id) {
- return !Pcs.resourcesContainer.get_resource_by_id(resource_id).get(
- "is_primitive"
- );
+ var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+ return !(resource_obj && resource_obj.get("is_primitive"));
});
if (not_primitives.length != 0) {
alert("Members of group have to be primitive resources. These resources" +
@@ -209,6 +208,29 @@ function add_node_dialog() {
});
}
+function add_sbd_device_textbox() {
+ var max_count = 3;
+ var device_inputs = $("#add_node_selector .add_node_sbd_device");
+ var count = device_inputs.length;
+ if (count < max_count) {
+ $(device_inputs[count-1]).after(
+ '<tr class="add_node_sbd_device"> \
+ <td> </td> \
+ <td> \
+ <input type="text" name="devices[]" /> \
+ <a href="#" onclick="sbd_device_remove_textbox(this); return false;"> \
+ (-) \
+ </a> \
+ </td> \
+ </tr>'
+ )
+ }
+}
+
+function sbd_device_remove_textbox(obj) {
+ $(obj).parents(".add_node_sbd_device").remove();
+}
+
function checkAddingNode(){
var nodeName = $("#add_node").children("form").find("[name='new_nodename']").val().trim();
if (nodeName == "") {
@@ -822,7 +844,7 @@ function auth_nodes_dialog(unauth_nodes, callback_success, callback_success_one)
dialog_obj.find('#auth_nodes_list').empty();
unauth_nodes.forEach(function(node) {
- dialog_obj.find('#auth_nodes_list').append("\t\t\t<tr><td>" + node + '</td><td><input type="password" name="' + node + '-pass"></td></tr>\n');
+ dialog_obj.find('#auth_nodes_list').append("\t\t\t<tr><td>" + htmlEncode(node) + '</td><td><input type="password" name="' + htmlEncode(node) + '-pass"></td></tr>\n');
});
}
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 0b2c674..005d45e 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -9,11 +9,16 @@ require 'tempfile'
require 'pcs.rb'
require 'resource.rb'
+require 'settings.rb'
require 'config.rb'
require 'cfgsync.rb'
require 'cluster_entity.rb'
require 'permissions.rb'
require 'auth.rb'
+require 'pcsd_file'
+require 'pcsd_remove_file'
+require 'pcsd_action_command'
+require 'pcsd_exchange_format.rb'
# Commands for remote access
def remote(params, request, auth_user)
@@ -36,6 +41,7 @@ def remote(params, request, auth_user)
:set_configs => method(:set_configs),
:set_certs => method(:set_certs),
:pcsd_restart => method(:remote_pcsd_restart),
+ :pcsd_instance_signature => method(:pcsd_instance_signature),
:get_permissions => method(:get_permissions_remote),
:set_permissions => method(:set_permissions_remote),
:cluster_start => method(:cluster_start),
@@ -86,6 +92,9 @@ def remote(params, request, auth_user)
:booth_set_config => method(:booth_set_config),
:booth_save_files => method(:booth_save_files),
:booth_get_config => method(:booth_get_config),
+ :put_file => method(:put_file),
+ :remove_file => method(:remove_file),
+ :manage_services => method(:manage_services),
}
remote_cmd_with_pacemaker = {
@@ -763,7 +772,14 @@ end
def remote_pcsd_restart(params, request, auth_user)
pcsd_restart()
- return [200, 'success']
+ return JSON.generate({
+ :success => true,
+ :instance_signature => DAEMON_INSTANCE_SIGNATURE,
+ })
+end
+
+def pcsd_instance_signature(params, request, auth_user)
+ return [200, DAEMON_INSTANCE_SIGNATURE]
end
def get_sw_versions(params, request, auth_user)
@@ -791,6 +807,12 @@ def remote_node_available(params, request, auth_user)
:pacemaker_remote => true,
})
end
+ if pacemaker_running?()
+ return JSON.generate({
+ :node_available => false,
+ :pacemaker_running => true,
+ })
+ end
return JSON.generate({:node_available => true})
end
@@ -808,8 +830,12 @@ def remote_add_node(params, request, auth_user, all=false)
if params[:new_ring1addr] != nil
node += ',' + params[:new_ring1addr]
end
+ device_list = []
+ if params[:devices].kind_of?(Array)
+ device_list = params[:devices]
+ end
retval, output = add_node(
- auth_user, node, all, auto_start, params[:watchdog]
+ auth_user, node, all, auto_start, params[:watchdog], device_list
)
end
@@ -1437,6 +1463,9 @@ def update_resource (params, request, auth_user)
end
resource_group = params[:resource_group]
end
+ # workaround for Error: this command is not sufficient for create remote
+ # connection, use 'pcs cluster node add-remote', use --force to override
+ cmd << "--force"
out, stderr, retval = run_cmd(auth_user, *cmd)
if retval != 0
return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
@@ -1545,8 +1574,7 @@ def get_avail_resource_agents(params, request, auth_user)
if not allowed_for_local_cluster(auth_user, Permissions::READ)
return 403, 'Permission denied'
end
- agents = getResourceAgents(auth_user)
- return JSON.generate(agents)
+ return JSON.generate(getResourceAgents(auth_user).map{|a| [a, {}]}.to_h)
end
def get_avail_fence_agents(params, request, auth_user)
@@ -2323,6 +2351,21 @@ def check_sbd(param, request, auth_user)
:exist => File.exist?(watchdog)
}
end
+ begin
+ device_list = JSON.parse(param[:device_list])
+ if device_list and device_list.respond_to?('each')
+ out[:device_list] = []
+ device_list.each { |device|
+ out[:device_list] << {
+ :path => device,
+ :exist => File.exists?(device),
+ :block_device => File.blockdev?(device),
+ }
+ }
+ end
+ rescue JSON::ParserError
+ return [400, 'Invalid input data format']
+ end
return [200, JSON.generate(out)]
end
@@ -2341,18 +2384,14 @@ def set_sbd_config(param, request, auth_user)
file.flock(File::LOCK_EX)
file.write(config)
rescue => e
- msg = "Unable to save SBD configuration: #{e}"
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Unable to save SBD configuration: #{e}")
ensure
if file
file.flock(File::LOCK_UN)
file.close()
end
end
- msg = 'SBD configuration saved.'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('SBD configuration saved.')
end
def get_sbd_config(param, request, auth_user)
@@ -2366,9 +2405,7 @@ def get_sbd_config(param, request, auth_user)
file.flock(File::LOCK_SH)
out = file.readlines()
rescue => e
- msg = "Unable to get SBD configuration: #{e}"
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Unable to get SBD configuration: #{e}")
ensure
if file
file.flock(File::LOCK_UN)
@@ -2383,13 +2420,9 @@ def sbd_disable(param, request, auth_user)
return 403, 'Permission denied'
end
if disable_service(get_sbd_service_name())
- msg = 'SBD disabled'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('SBD disabled')
else
- msg = 'Disabling SBD failed'
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Disabling SBD failed")
end
end
@@ -2398,13 +2431,9 @@ def sbd_enable(param, request, auth_user)
return 403, 'Permission denied'
end
if enable_service(get_sbd_service_name())
- msg = 'SBD enabled'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('SBD enabled')
else
- msg = 'Enabling SBD failed'
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Enabling SBD failed")
end
end
@@ -2580,13 +2609,9 @@ def qdevice_client_disable(param, request, auth_user)
return 403, 'Permission denied'
end
if disable_service('corosync-qdevice')
- msg = 'corosync-qdevice disabled'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('corosync-qdevice disabled')
else
- msg = 'Disabling corosync-qdevice failed'
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Disabling corosync-qdevice failed")
end
end
@@ -2595,17 +2620,11 @@ def qdevice_client_enable(param, request, auth_user)
return 403, 'Permission denied'
end
if not is_service_enabled?('corosync')
- msg = 'corosync is not enabled, skipping'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('corosync is not enabled, skipping')
elsif enable_service('corosync-qdevice')
- msg = 'corosync-qdevice enabled'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('corosync-qdevice enabled')
else
- msg = 'Enabling corosync-qdevice failed'
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Enabling corosync-qdevice failed")
end
end
@@ -2614,13 +2633,9 @@ def qdevice_client_stop(param, request, auth_user)
return 403, 'Permission denied'
end
if stop_service('corosync-qdevice')
- msg = 'corosync-qdevice stopped'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('corosync-qdevice stopped')
else
- msg = 'Stopping corosync-qdevice failed'
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Stopping corosync-qdevice failed")
end
end
@@ -2629,17 +2644,11 @@ def qdevice_client_start(param, request, auth_user)
return 403, 'Permission denied'
end
if not is_service_running?('corosync')
- msg = 'corosync is not running, skipping'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('corosync is not running, skipping')
elsif start_service('corosync-qdevice')
- msg = 'corosync-qdevice started'
- $logger.info(msg)
- return [200, msg]
+ return pcsd_success('corosync-qdevice started')
else
- msg = 'Starting corosync-qdevice failed'
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Starting corosync-qdevice failed")
end
end
@@ -2686,98 +2695,118 @@ def unmanage_resource(param, request, auth_user)
end
def booth_set_config(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
begin
- unless params[:data_json]
- return [400, "Missing required parameter 'data_json'"]
+ check_permissions(auth_user, Permissions::WRITE)
+ data = check_request_data_for_json(params, auth_user)
+
+ PcsdExchangeFormat::validate_item_map_is_Hash('files', data)
+ PcsdExchangeFormat::validate_item_is_Hash('file', :config, data[:config])
+ if data[:authfile]
+ PcsdExchangeFormat::validate_item_is_Hash('file', :config, data[:config])
+ end
+
+ action_results = {
+ :config => PcsdExchangeFormat::run_action(
+ PcsdFile::TYPES,
+ "file",
+ :config,
+ data[:config].merge({
+ :type => "booth_config",
+ :rewrite_existing => true
+ })
+ )
+ }
+
+ if data[:authfile]
+ action_results[:authfile] = PcsdExchangeFormat::run_action(
+ PcsdFile::TYPES,
+ "file",
+ :authfile,
+ data[:authfile].merge({
+ :type => "booth_authfile",
+ :rewrite_existing => true
+ })
+ )
end
- data = JSON.parse(params[:data_json], {:symbolize_names => true})
- rescue JSON::ParserError
- return [400, 'Invalid input data format']
- end
- config = data[:config]
- authfile = data[:authfile]
- return [400, 'Invalid input data format'] unless (
- config and config[:name] and config[:data]
- )
- return [400, 'Invalid input data format'] if (
- authfile and (not authfile[:name] or not authfile[:data])
- )
- begin
- write_booth_config(config[:name], config[:data])
- if authfile
- write_booth_authfile(authfile[:name], authfile[:data])
+
+ success_codes = [:written, :rewritten]
+ failed_results = action_results.select{|key, result|
+ !success_codes.include?(result[:code])
+ }
+
+ if failed_results.empty?
+ return pcsd_success('Booth configuration saved.')
end
- rescue InvalidFileNameException => e
- return [400, "Invalid format of config/key file name '#{e.message}'"]
+
+ return pcsd_error("Unable to save booth configuration: #{
+ failed_results.reduce([]){|memo, (key, result)|
+ memo << "#{key}: #{result[:code]}: #{result[:message]}"
+ }.join(";")
+ }")
+ rescue PcsdRequestException => e
+ return e.code, e.message
+ rescue PcsdExchangeFormat::Error => e
+ return 400, "Invalid input data format: #{e.message}"
rescue => e
- msg = "Unable to save booth configuration: #{e.message}"
- $logger.error(msg)
- return [400, msg]
+ return pcsd_error("Unable to save booth configuration: #{e.message}")
end
- msg = 'Booth configuration saved.'
- $logger.info(msg)
- return [200, msg]
end
def booth_save_files(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
begin
- data = JSON.parse(params[:data_json], {:symbolize_names => true})
- data.each { |file|
- unless file[:name] and file[:data]
- return [400, 'Invalid input data format']
- end
- if file[:name].include?('/')
- return [400, "Invalid file name format '#{file[:name]}'"]
- end
+ check_permissions(auth_user, Permissions::WRITE)
+ data = check_request_data_for_json(params, auth_user)
+ rewrite_existing = (
+ params.include?('rewrite_existing') || params.include?(:rewrite_existing)
+ )
+
+ action_results = Hash[data.each_with_index.map{|file, i|
+ PcsdExchangeFormat::validate_item_is_Hash('file', i, file)
+ [
+ i,
+ PcsdExchangeFormat::run_action(
+ PcsdFile::TYPES,
+ 'file',
+ i,
+ file.merge({
+ :rewrite_existing => rewrite_existing,
+ :type => file[:is_authfile] ? "booth_authfile" : "booth_config"
+ })
+ )
+ ]
+ }]
+
+ results = {:existing => [], :saved => [], :failed => {}}
+
+ code_result_map = {
+ :written => :saved,
+ :rewritten => :saved,
+ :same_content => :saved,
+ :conflict => :existing,
}
- rescue JSON::ParserError, NoMethodError
- return [400, 'Invalid input data format']
- end
- rewrite_existing = (
- params.include?('rewrite_existing') || params.include?(:rewrite_existing)
- )
- conflict_files = []
- data.each { |file|
- next unless File.file?(File.join(BOOTH_CONFIG_DIR, file[:name]))
- if file[:is_authfile]
- cur_data = read_booth_authfile(file[:name])
- else
- cur_data = read_booth_config(file[:name])
- end
- if cur_data != file[:data]
- conflict_files << file[:name]
- end
- }
+ action_results.each{|i, result|
+ name = data[i][:name]
+
+ if code_result_map.has_key?(result[:code])
+ results[code_result_map[result[:code]]] << name
+
+ elsif result[:code] == :unexpected
+ results[:failed][name] = result[:message]
- write_failed = {}
- saved_files = []
- data.each { |file|
- next if conflict_files.include?(file[:name]) and not rewrite_existing
- begin
- if file[:is_authfile]
- write_booth_authfile(file[:name], file[:data])
else
- write_booth_config(file[:name], file[:data])
+ results[:failed][name] = "Unknown process file result:"+
+ "code: '#{result[:code]}': message: '#{result[:message]}'"
+
end
- saved_files << file[:name]
- rescue => e
- msg = "Unable to save file (#{file[:name]}): #{e.message}"
- $logger.error(msg)
- write_failed[file[:name]] = e
- end
- }
- return [200, JSON.generate({
- :existing => conflict_files,
- :saved => saved_files,
- :failed => write_failed
- })]
+ }
+
+ return [200, JSON.generate(results)]
+ rescue PcsdRequestException => e
+ return e.code, e.message
+ rescue PcsdExchangeFormat::Error => e
+ return 400, "Invalid input data format: #{e.message}"
+ end
end
def booth_get_config(params, request, auth_user)
@@ -2825,6 +2854,73 @@ def booth_get_config(params, request, auth_user)
end
end
+def put_file(params, request, auth_user)
+ begin
+ check_permissions(auth_user, Permissions::WRITE)
+
+ files = check_request_data_for_json(params, auth_user)
+ PcsdExchangeFormat::validate_item_map_is_Hash('files', files)
+
+ return pcsd_success(
+ JSON.generate({"files" => Hash[files.map{|id, file_data|
+ PcsdExchangeFormat::validate_item_is_Hash('file', id, file_data)
+ [id, PcsdExchangeFormat::run_action(
+ PcsdFile::TYPES, 'file', id, file_data
+ )]
+ }]})
+ )
+ rescue PcsdRequestException => e
+ return e.code, e.message
+ rescue PcsdExchangeFormat::Error => e
+ return 400, "Invalid input data format: #{e.message}"
+ end
+end
+
+def remove_file(params, request, auth_user)
+ begin
+ check_permissions(auth_user, Permissions::WRITE)
+
+ files = check_request_data_for_json(params, auth_user)
+ PcsdExchangeFormat::validate_item_map_is_Hash('files', files)
+
+ return pcsd_success(
+ JSON.generate({"files" => Hash[files.map{|id, file_data|
+ PcsdExchangeFormat::validate_item_is_Hash('file', id, file_data)
+ [id, PcsdExchangeFormat::run_action(
+ PcsdRemoveFile::TYPES, 'file', id, file_data
+ )]
+ }]})
+ )
+ rescue PcsdRequestException => e
+ return e.code, e.message
+ rescue PcsdExchangeFormat::Error => e
+ return 400, "Invalid input data format: #{e.message}"
+ end
+end
+
+
+def manage_services(params, request, auth_user)
+ begin
+ check_permissions(auth_user, Permissions::WRITE)
+
+ actions = check_request_data_for_json(params, auth_user)
+ PcsdExchangeFormat::validate_item_map_is_Hash('actions', actions)
+
+ return pcsd_success(
+ JSON.generate({"actions" => Hash[actions.map{|id, action_data|
+ PcsdExchangeFormat::validate_item_is_Hash("action", id, action_data)
+ [id, PcsdExchangeFormat::run_action(
+ PcsdActionCommand::TYPES, "action", id, action_data
+ )]
+ }]})
+ )
+ rescue PcsdRequestException => e
+ return e.code, e.message
+ rescue PcsdExchangeFormat::Error => e
+ return 400, "Invalid input data format: #{e.message}"
+ end
+end
+
def _hash_to_argument_list(hash)
result = []
if hash.kind_of?(Hash)
@@ -2963,3 +3059,39 @@ def update_recipient(params, request, auth_user)
end
return [200, 'Recipient updated']
end
+
+def pcsd_success(msg)
+ $logger.info(msg)
+ return [200, msg]
+end
+
+def pcsd_error(msg)
+ $logger.error(msg)
+ return [400, msg]
+end
+
+class PcsdRequestException < StandardError
+ attr_accessor :code
+
+ def initialize(message = nil, code = 400)
+ super(message)
+ self.code = code
+ end
+end
+
+def check_permissions(auth_user, permission)
+ unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+ raise PcsdRequestException.new('Permission denied', 403)
+ end
+end
+
+def check_request_data_for_json(params, auth_user)
+ unless params[:data_json]
+ raise PcsdRequestException.new("Missing required parameter 'data_json'")
+ end
+ begin
+ return JSON.parse(params[:data_json], {:symbolize_names => true})
+ rescue JSON::ParserError
+ raise PcsdRequestException.new('Invalid input data format')
+ end
+end
diff --git a/pcsd/resource.rb b/pcsd/resource.rb
index 8821f42..d2d7091 100644
--- a/pcsd/resource.rb
+++ b/pcsd/resource.rb
@@ -127,8 +127,12 @@ def getAllConstraints(constraints_dom)
e.elements.each('rule') { |rule|
rule_info = {
'rule_string' => rule_export.export(rule),
- 'rsc' => e.attributes['rsc'],
}
+ if e.attributes["rsc-pattern"]
+ rule_info["rsc-pattern"] = e.attributes["rsc-pattern"]
+ else
+ rule_info["rsc"] = e.attributes["rsc"]
+ end
rule.attributes.each { |name, value|
rule_info[name] = value unless name == 'boolean-op'
}
@@ -168,23 +172,16 @@ def getAllConstraints(constraints_dom)
end
def getResourceAgents(auth_user)
- resource_agent_list = {}
stdout, stderr, retval = run_cmd(
auth_user, PCS, "resource", "list", "--nodesc"
)
if retval != 0
$logger.error("Error running 'pcs resource list --nodesc")
$logger.error(stdout + stderr)
- return {}
+ return []
end
- agents = stdout
- agents.each { |a|
- ra = ResourceAgent.new
- ra.name = a.chomp
- resource_agent_list[ra.name] = ra
- }
- return resource_agent_list
+ return stdout.map{|agent_name| agent_name.chomp}
end
class Resource
@@ -239,48 +236,30 @@ class Resource
end
-class ResourceAgent
- attr_accessor :name, :resource_class, :required_options, :optional_options, :info
- def initialize(name=nil, required_options={}, optional_options={}, resource_class=nil)
- @name = name
- @required_options = required_options
- @optional_options = optional_options
- @resource_class = nil
- end
-
- def provider
- name.gsub(/::.*/,"")
- end
-
- def class
- name.gsub(/.*::(.*):.*/,"$1")
- end
-
- def type
- name.gsub(/.*:/,"")
- end
-
- def name
- @name
- end
-
- def to_json(options = {})
- JSON.generate({"type" => type})
- end
-
- def long_desc
- if info && info.length >= 2
- return info[1]
- end
- return ""
- end
-
- def short_desc
- if info && info.length >= 1
- return info[0]
+def get_resource_agent_name_structure(agent_name)
+ [
+ #only ocf contains a provider
+ /^(?<standard>ocf:[^:]+):(?<type>[^:]+)$/,
+ #colon can occur in systemd instance after @ but it does not separates
+ #a provider and a type
+ /^(?<standard>systemd|service):(?<type>[^:@]+ at .*)$/,
+ #others do not contain a provider
+ %r{
+ ^(?<standard>lsb|heartbeat|stonith|upstart|service|systemd|nagios)
+ :
+ (?<type>[^:]+)$
+ }x,
+ ].each{|expression|
+ match = expression.match(agent_name)
+ if match
+ return {
+ :full_name => agent_name,
+ :class_provider => match[:standard],
+ :type => match[:type],
+ }
end
- return ""
- end
+ }
+ return nil
end
diff --git a/pcsd/session.rb b/pcsd/session.rb
index c54a493..8b09ed8 100644
--- a/pcsd/session.rb
+++ b/pcsd/session.rb
@@ -1,3 +1,4 @@
+gem 'rack', '< 2.0.0'
require 'rack/session/pool'
class SessionPoolLifetime < Rack::Session::Pool
diff --git a/pcsd/settings.rb b/pcsd/settings.rb
index e0b1f8b..a400bc5 100644
--- a/pcsd/settings.rb
+++ b/pcsd/settings.rb
@@ -1,3 +1,4 @@
+PCS_EXEC = '/usr/sbin/pcs'
PCSD_EXEC_LOCATION = '/usr/lib/pcsd/'
PCSD_VAR_LOCATION = '/var/lib/pcsd/'
@@ -17,12 +18,14 @@ CIBADMIN = "/usr/sbin/cibadmin"
SBD_CONFIG = '/etc/sysconfig/sbd'
CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
BOOTH_CONFIG_DIR='/etc/booth'
+PACEMAKER_AUTHKEY='/etc/pacemaker/authkey'
COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt"
)
COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb"
+COROSYNC_AUTHKEY = "/etc/corosync/authkey"
SUPERUSER = 'hacluster'
ADMIN_GROUP = 'haclient'
diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
index e3f2192..8790bd1 100644
--- a/pcsd/settings.rb.debian
+++ b/pcsd/settings.rb.debian
@@ -1,3 +1,4 @@
+PCS_EXEC = '/usr/sbin/pcs'
PCSD_EXEC_LOCATION = '/usr/share/pcsd/'
PCSD_VAR_LOCATION = '/var/lib/pcsd/'
@@ -17,12 +18,14 @@ CIBADMIN = "/usr/sbin/cibadmin"
SBD_CONFIG = "/etc/default/sbd"
CIB_PATH = "/var/lib/pacemaker/cib/cib.xml"
BOOTH_CONFIG_DIR='/etc/booth'
+PACEMAKER_AUTHKEY='/etc/pacemaker/authkey'
COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt"
)
COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb"
+COROSYNC_AUTHKEY = "/etc/corosync/authkey"
SUPERUSER = 'hacluster'
ADMIN_GROUP = 'haclient'
diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
index 7bbff46..24ee059 100644
--- a/pcsd/ssl.rb
+++ b/pcsd/ssl.rb
@@ -2,6 +2,7 @@ require 'rubygems'
require 'webrick'
require 'webrick/https'
require 'openssl'
+gem 'rack', '< 2.0.0'
require 'rack'
require 'socket'
@@ -65,35 +66,42 @@ def run_server(server, webrick_options, secondary_addrs)
primary_addr = webrick_options[:BindAddress]
port = webrick_options[:Port]
- ciphers = 'DEFAULT:!RC4:!3DES:@STRENGTH!'
+ ciphers = 'DEFAULT:!RC4:!3DES:@STRENGTH'
ciphers = ENV['PCSD_SSL_CIPHERS'] if ENV['PCSD_SSL_CIPHERS']
# no need to validate ciphers, ssl context will validate them for us
$logger.info("Listening on #{primary_addr} port #{port}")
- server.run(Sinatra::Application, webrick_options) { |server_instance|
- # configure ssl options
- server_instance.ssl_context.ciphers = ciphers
- # set listening addresses
- secondary_addrs.each { |addr|
- $logger.info("Adding listener on #{addr} port #{port}")
- server_instance.listen(addr, port)
- }
- # notify systemd we are running
- if ISSYSTEMCTL
- socket_name = ENV['NOTIFY_SOCKET']
- if socket_name
- if socket_name.start_with?('@')
- # abstract namespace socket
- socket_name[0] = "\0"
+ begin
+ server.run(Sinatra::Application, webrick_options) { |server_instance|
+ # configure ssl options
+ server_instance.ssl_context.ciphers = ciphers
+ # set listening addresses
+ secondary_addrs.each { |addr|
+ $logger.info("Adding listener on #{addr} port #{port}")
+ server_instance.listen(addr, port)
+ }
+ # notify systemd we are running
+ if ISSYSTEMCTL
+ if ENV['NOTIFY_SOCKET']
+ socket_name = ENV['NOTIFY_SOCKET'].dup
+ if socket_name.start_with?('@')
+ # abstract namespace socket
+ socket_name[0] = "\0"
+ end
+ $logger.info("Notifying systemd we are running (socket #{socket_name})")
+ sd_socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM)
+ sd_socket.connect(Socket.pack_sockaddr_un(socket_name))
+ sd_socket.send('READY=1', 0)
+ sd_socket.close()
end
- $logger.info("Notifying systemd we are running (socket #{socket_name})")
- sd_socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM)
- sd_socket.connect(Socket.pack_sockaddr_un(socket_name))
- sd_socket.send('READY=1', 0)
- sd_socket.close()
end
- end
- }
+ }
+ rescue Errno::EADDRNOTAVAIL, Errno::EADDRINUSE => e
+ $logger.error 'Unable to bind to specified address(es), exiting'
+ $logger.error e.message
+ rescue SocketError => e
+ $logger.error e.message
+ end
end
if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE)
diff --git a/pcsd/test/test_config.rb b/pcsd/test/test_config.rb
index 26ffaf9..441ac6d 100644
--- a/pcsd/test/test_config.rb
+++ b/pcsd/test/test_config.rb
@@ -122,12 +122,12 @@ class TestConfig < Test::Unit::TestCase
]
}'
cfg = PCSConfig.new(text)
- assert_equal(
- [[
- 'error',
- "Unable to parse pcs_settings file: 399: unexpected token at '\"rh71-node2\"\n ]\n }\n ]\n}'"
- ]],
- $logger.log
+ assert_equal(1, $logger.log.length)
+ assert_equal('error', $logger.log[0][0])
+ assert_match(
+ # the number is based on JSON gem version
+ /Unable to parse pcs_settings file: \d+: unexpected token/,
+ $logger.log[0][1]
)
assert_equal(fixture_empty_config, cfg.text)
end
diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb
index 86e5567..258e88f 100644
--- a/pcsd/views/_resource.erb
+++ b/pcsd/views/_resource.erb
@@ -59,19 +59,12 @@
<tr>
<td class="bold">Class/Provider</td>
<td>
- <%
- resources = @resource_agents.keys
-
- class_providers = []
- resources.each {|k|
- class_providers << k[0,k.rindex(':')]
- }
- class_providers.uniq!
- class_providers.sort!
- %>
<select id="resource_class_provider_selector">
- <% class_providers.each {|cp| %>
- <option <%= "selected" if cp == "ocf:heartbeat" %> value="<%=cp%>"><%=cp%></option>
+ <% @resource_agent_structures.map{|a| a[:class_provider]}.uniq.sort.each{|cp| %>
+ <option
+ <%= "selected" if cp == "ocf:heartbeat" %>
+ value="<%=cp%>"
+ ><%=cp%></option>
<% } %>
</select>
</td>
@@ -80,13 +73,13 @@
<td class="bold">Type</td>
<td>
<select id="all_ra_types" style="display:none;">
- <% resources.sort_by{|a|a.downcase}.each { |key| %>
- <option width=250px value="<%=key%>"><%=key[(key.rindex(':')+1)..-1]%></option>
+ <% @resource_agent_structures.sort_by{|a| a[:full_name].downcase}.each{|a| %>
+ <option value="<%=a[:full_name]%>"><%=a[:type]%></option>
<% } %>
</select>
<select id="add_ra_type" onchange="load_resource_form(this.value);">
- <% resources.sort_by{|a|a.downcase}.each { |key| %>
- <option width=250px <%= key == "ocf:heartbeat:IPaddr2" ? "selected" : "" %> value="<%=key%>"><%=key%></option>
+ <% @resource_agent_structures.each{|a| %>
+ <option width=250px value="<%=a[:full_name]%>"><%=a[:type]%></option>
<% } %>
</select>
</td>
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index bd7c234..ceb2b7d 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -978,6 +978,12 @@ Use the 'Add' button to submit the form.">
table_name="Optional Arguments"
parameters=agent.optional_parameters
}}
+ {{parameters-table
+ table_id="advanced_args"
+ table_id_suffix=table_id_suffix
+ table_name="Advanced Arguments"
+ parameters=agent.advanced_parameters
+ }}
{{#if resource}}
<input class="apply_changes" type="button" onclick="$(this).hide();create_resource($(this)
.parents('form'),true); return false;" value="Apply Changes" style="margin-top: 1em;" />
diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb
index 8fccd25..3c3aeed 100644
--- a/pcsd/views/nodes.erb
+++ b/pcsd/views/nodes.erb
@@ -347,6 +347,15 @@
<input type="text" name="watchdog" placeholder="/dev/watchdog" />
</td>
</tr>
+ <tr class="add_node_sbd_device">
+ <td>
+ SBD device(s):
+ </td>
+ <td>
+ <input type="text" name="devices[]" />
+ <a href="#" onclick="add_sbd_device_textbox(); return false;">(+)</a>
+ </td>
+ </tr>
{{/if}}
{{#if Pcs.is_cman_with_udpu_transport}}
<tr>
diff --git a/pylintrc b/pylintrc
index 1b26fa1..3bdb564 100644
--- a/pylintrc
+++ b/pylintrc
@@ -1,10 +1,13 @@
[MESSAGES CONTROL]
#useful links:
#http://pylint-messages.wikidot.com/all-codes
-#https://docs.pylint.org/features.html
+#https://pylint.readthedocs.io/en/latest/reference_guide/features.html
#
#can be used for example with
+#find ./ -name '*.py' -exec pylint --reports=n \{\} --msg-template='{path}:{line}: {msg_id}: [{symbol}] {msg}' \;
+#or
#pylint --reports=n ./**/*.py --msg-template='{path}:{line}: {msg_id}: [{symbol}] {msg}'
+#(which gives duplicate-code problems)
#
#project adapted to pylint 1.5.4, astroid 1.4.4
#
@@ -61,21 +64,21 @@
#W0703: [broad-except] Catching too general exception %s
#W0710: [nonstandard-exception] Exception doesn't inherit from standard "Exception" class
#W1401: [anomalous-backslash-in-string] Anomalous backslash in string: \'%s\'. String constant might be missing an r prefix.
-disable=no-name-in-module, import-error, nonstandard-exception, unused-argument, redefined-outer-name, bare-except, anomalous-backslash-in-string, no-member, star-args, undefined-loop-variable, maybe-no-member, broad-except, too-few-public-methods, not-callable, protected-access, method-hidden, too-many-arguments, global-statement, unbalanced-tuple-unpacking, fixme, lost-exception, dangerous-default-value, too-many-return-statements, no-self-use, no-init, redefined-builtin, wildcard-imp [...]
+disable=no-name-in-module, import-error, nonstandard-exception, unused-argument, redefined-outer-name, bare-except, anomalous-backslash-in-string, no-member, star-args, undefined-loop-variable, maybe-no-member, broad-except, too-few-public-methods, not-callable, protected-access, method-hidden, too-many-arguments, global-statement, unbalanced-tuple-unpacking, fixme, lost-exception, dangerous-default-value, too-many-return-statements, no-self-use, no-init, redefined-builtin, wildcard-imp [...]
[DESIGN]
# Maximum number of locals for function / method body
-max-locals=48
+max-locals=47
# Maximum number of statements in function / method body
-max-statements=211
+max-statements=160
# Maximum number of branch for function / method body
-max-branches=68
+max-branches=63
# Maximum number of public methods for a class (see R0904).
-max-public-methods=113
-# Maximum number of boolean expressions in a if statement
+max-public-methods=115
+# Maximum number of boolean expressions in a if statement (default 5)
+# currently in pcs/utils.py and in pcs/resource.py
max-bool-expr=6
-
[LOGGING]
# Ignore imports when computing similarities.
# unfortunately don't work with from abc import (...similar lines...
@@ -91,12 +94,11 @@ dummy-variables-rgx=_$|dummy
[FORMAT]
-# Maximum number of lines in a module
-max-module-lines=4930
# Maximum number of characters on a single line.
-max-line-length=1291
+max-line-length=637
[ELIF]
# Maximum number of nested blocks for function / method body
-max-nested-blocks=7
+# currently in pcs/utils.py, pcs/resource.py, pcs/cluster.py, pcs/constraint.py
+max-nested-blocks=6
diff --git a/setup.py b/setup.py
index 8def987..1525aae 100644
--- a/setup.py
+++ b/setup.py
@@ -9,6 +9,7 @@ class CleanCommand(Command):
def initialize_options(self):
self.cwd = None
def finalize_options(self):
+ #pylint: disable=attribute-defined-outside-init
self.cwd = os.getcwd()
def run(self):
assert os.getcwd() == self.cwd, 'Must be in package root: %s' % self.cwd
@@ -16,14 +17,14 @@ class CleanCommand(Command):
setup(
name='pcs',
- version='0.9.155',
+ version='0.9.158',
description='Pacemaker Configuration System',
author='Chris Feist',
author_email='cfeist at redhat.com',
url='https://github.com/ClusterLabs/pcs',
packages=find_packages(),
package_data={'pcs':[
- 'bash_completion.sh',
+ 'bash_completion',
'pcs.8',
'pcs',
'test/resources/*.xml',
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git
More information about the Debian-HA-Commits
mailing list