[Debian-ha-commits] [crmsh] 01/05: Imported Upstream version 2.2.1

Adrian Vondendriesch discostu-guest at moszumanska.debian.org
Tue May 3 18:30:34 UTC 2016


This is an automated email from the git hooks/post-receive script.

discostu-guest pushed a commit to branch master
in repository crmsh.

commit 18ff19edd5400c86725240b7a12824d8b92aedd6
Author: Adrian Vondendriesch <adrian.vondendriesch at credativ.de>
Date:   Tue May 3 20:03:58 2016 +0200

    Imported Upstream version 2.2.1
---
 .gitignore                               |    5 +
 .landscape.yaml                          |   14 +
 AUTHORS                                  |    6 +
 ChangeLog                                |   60 ++
 Makefile.am                              |    9 +-
 README.md                                |   30 +-
 configure.ac                             |    4 +-
 contrib/git-hook-pre-commit              |   14 +
 contrib/pygments_crmsh_lexers/ansiclr.py |    5 +-
 contrib/pygments_crmsh_lexers/crmsh.py   |   13 +-
 data-manifest                            |    2 +
 doc/crm.8.adoc                           |   37 +-
 doc/website-v1/man-1.2.adoc              |    2 +-
 doc/website-v1/man-2.0.adoc              |    2 +-
 hb_report/hb_report.in                   |   28 +-
 hb_report/utillib.sh                     |   25 +-
 modules/cache.py                         |    4 +-
 modules/cibconfig.py                     |  314 ++++----
 modules/cibstatus.py                     |    4 +-
 modules/clidisplay.py                    |   22 +-
 modules/cliformat.py                     |   14 +-
 modules/command.py                       |   25 +-
 modules/config.py                        |   21 +-
 modules/constants.py                     |    8 +-
 modules/corosync.py                      |   47 +-
 modules/crm_gv.py                        |   48 +-
 modules/crm_pssh.py                      |   32 +-
 modules/handles.py                       |    4 +-
 modules/help.py                          |    2 +-
 modules/history.py                       | 1190 ++++++------------------------
 modules/idmgmt.py                        |    8 +-
 modules/log_patterns.py                  |   73 +-
 modules/log_patterns_118.py              |   76 --
 modules/logparser.py                     |  627 ++++++++++++++++
 modules/logtime.py                       |  218 ++++++
 modules/main.py                          |   13 +-
 modules/ordereddict.py                   |  260 +++----
 modules/orderedset.py                    |   10 +-
 modules/pacemaker.py                     |   16 +-
 modules/parse.py                         |   38 +-
 modules/ra.py                            |   86 ++-
 modules/rsctest.py                       |   12 +-
 modules/schema.py                        |    6 +-
 modules/scripts.py                       |  134 ++--
 modules/term.py                          |   46 +-
 modules/tmpfiles.py                      |    8 +-
 modules/ui_cib.py                        |    2 +-
 modules/ui_configure.py                  |   23 +-
 modules/ui_context.py                    |   22 +-
 modules/ui_corosync.py                   |    4 +-
 modules/ui_history.py                    |   49 +-
 modules/ui_maintenance.py                |    3 +-
 modules/ui_node.py                       |   21 +-
 modules/ui_options.py                    |    2 +-
 modules/ui_script.py                     |   54 +-
 modules/ui_template.py                   |    2 +-
 modules/ui_utils.py                      |    1 -
 modules/utils.py                         |  177 +++--
 modules/xmlutil.py                       |   67 +-
 requirements.txt                         |    1 +
 scripts/apache/main.yml                  |    7 +-
 scripts/clvm-vg/main.yml                 |    2 +-
 scripts/clvm/main.yml                    |    2 +-
 scripts/db2/main.yml                     |    6 +-
 scripts/drbd/main.yml                    |    2 +-
 scripts/exportfs/main.yml                |   14 +-
 scripts/filesystem/main.yml              |    4 +-
 scripts/gfs2-base/main.yml               |   10 +-
 scripts/gfs2/main.yml                    |   28 +-
 scripts/health/collect.py                |    2 +-
 scripts/init/configure.py                |    2 +-
 scripts/libvirt/main.yml                 |    8 +-
 scripts/mailto/main.yml                  |    4 +-
 scripts/nfsserver/main.yml               |   24 +-
 scripts/ocfs2/main.yml                   |   72 +-
 scripts/raid-lvm/main.yml                |    8 +-
 scripts/sap-as/main.yml                  |   10 +-
 scripts/sap-ci/main.yml                  |   10 +-
 scripts/sap-db/main.yml                  |   10 +-
 scripts/sap-simple-stack-plus/main.yml   |   34 +-
 scripts/sap-simple-stack/main.yml        |   18 +-
 scripts/sbd/main.yml                     |   24 +-
 setup.py                                 |    6 +-
 test/list-undocumented-commands.py       |    2 +-
 test/profile-history.sh                  |   22 +
 test/testcases/edit.exp                  |    1 -
 test/testcases/history                   |    3 +
 test/testcases/history.exp               |  154 +++-
 test/testcases/ra.exp                    |    6 +-
 test/testcases/scripts.exp               |   44 +-
 test/unittests/test_bugs.py              |   32 +-
 test/unittests/test_cliformat.py         |   21 +-
 test/unittests/test_corosync.py          |   22 +-
 test/unittests/test_scripts.py           |   51 ++
 test/unittests/test_time.py              |   11 +-
 95 files changed, 2688 insertions(+), 2038 deletions(-)

diff --git a/.gitignore b/.gitignore
index b1cdedc..73bb6ec 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
 *.pyc
 *~
 #*.*#
+.#*
 doc/website-v1/gen
 Makefile.in
 autom4te.cache
@@ -12,3 +13,7 @@ patches/*
 .project
 .settings
 .pydevproject
+
+contrib/build/
+contrib/dist/
+contrib/pygments_crmsh_lexers.egg-info/
diff --git a/.landscape.yaml b/.landscape.yaml
new file mode 100644
index 0000000..fccc0a6
--- /dev/null
+++ b/.landscape.yaml
@@ -0,0 +1,14 @@
+max-line-length: 160
+python-targets:
+  - 2
+ignore-paths:
+  - contrib
+pylint:
+  disable:
+    - unused-argument
+    - unused-variable
+    - too-many-arguments
+    - too-many-statements
+    - too-many-locals
+    - too-many-branches
+    - cyclic-import
diff --git a/AUTHORS b/AUTHORS
index 86fd4ea..2e50425 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -2,21 +2,27 @@ NOTE:	The work of everyone on this project is dearly appreciated. If you
 	are not listed here but should be, please notify us!
 
 	afederic <afederic[at]gmail[dot]com>
+	Adam Spiers <aspiers[at]suse[dot]com>
+	Andrei Maruha <Andrei_Maruha[at]epam[dot]com>
 	Andrew Beekhof <andrew[at]beekhof[dot]net>
 	Borislav Borisov <borislav[dot]v[dot]borisov[at]gmail[dot]com>
 	Christian Seiler <christian[at]iwakd[dot]de>
+	Daniel Hoffend <dh[at]dotlan[dot]net>
 	Dejan Muhamedagic <dejan[at]suse[dot]de>
 	Federica Teodori <federica[dot]teodori[at]googlemail[dot]com>
 	Florian Haas <florian[dot]haas[at]linbit[dot]com>
 	Goldwyn Rodrigues <rgoldwyn[at]novell[dot]com>
 	Hideo Yamauchi <renayama19661014[at]ybb[dot]ne[dot]jp>
 	Holger Teutsch <holger[dot]teutsch[at]web[dot]de>
+	Kai Kang <kai[dot]kang[at]windriver[dot]com>
 	Kazunori INOUE <kazunori[dot]inoue3[at]gmail[dot]com>
 	Keisuke MORI <keisuke[dot]mori+ha[at]gmail[dot]com>
 	Kristoffer Gronlund <kgronlund[at]suse[dot]com>
 	Lars Ellenberg <lars[dot]ellenberg[at]linbit[dot]com>
 	Lars Marowsky-Brée <lmb[at]suse[dot]de>
+	Marc A. Smith <marc[at]astersmith[dot]com>
 	Michael Prokop <devnull[at]localhost>
+	Motaharu Kobu <mkubo[at]3ware[dot]co[dot]jp>
 	NAKAHIRA Kazutomo <nakahira[dot]kazutomo[at]oss[dot]ntt[dot]co[dot]jp>
 	nozawat <nozawat[at]gmail[dot]com>
 	renayama19661014 <renayama19661014[at]ybb[dot]ne[dot]jp>
diff --git a/ChangeLog b/ChangeLog
index f48ae06..a8a56b2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,63 @@
+* Thu Apr 28 2016 Kristoffer Gronlund <kgronlund at suse.com> and many others
+- Release 2.2.1
+- medium: ui_node: Fix crash in node fence command (bsc#974902)
+- low: parse: Don't validate operation name in parser (bsc#975357)
+- low: constants: Add missing reload operation to parser
+- medium: ui_node: Fix "crm node fence" (bsc#974902) (#134)
+- low: corosync: Recycle node IDs when possible
+- low: scripts: Only print debug output locally unless there were remote actions
+- low: cibconfig: Don't mix up CLI name with XML tag
+- low: parser: ignore case for attr: prefix
+- medium: scripts: Use os.uname() to find hostname (#128)
+- low: history: Don't skip nodes without logs
+- low: logparser: Don't crash on nodes without logs
+- low: report: if present, use the subsecond part from syslog timestamps
+- low: report: add information about booth
+- low: history: update the syslog format matching
+- low: scripts: Need sudo if non-local call
+- medium: hb_report: Add timeout to SSH connection (bsc#971690)
+- low: scripts: Clean up various scripts
+- medium: main: Add -o|--opt to pass extra options for crmsh
+- low: command: handle stray regex characters in input
+- low: doc: make history example consistent with timeframe deprecation
+- medium: scripts: Don't require sudo for root
+- medium: scripts: inline scripts for call actions
+- medium: scripts: Simplify SBD script (bsc#968076) (fate#318320)
+- low: logparser: Add cib info to __meta for hawk
+- low: hb_report: Suggest user checks timeframe on empty logs (bsc#970823)
+- medium: hb_report: Use server attribute for remote nodes if set (bsc#970819)
+- high: history: Faster log parsing (bsc#920278)
+- low: log_patterns_118: Add captures to log patterns for tagging (bsc#970278)
+- medium: crm_pssh: Fix live refresh of journalctl logs (bsc#970931)
+- low: hb_report: Warn if generated report is empty (bsc#970823)
+- low: hb_report: Print covered time span at exit (bsc#970823)
+- low: logtime: Improve performance of syslog_ts (bsc#970278)
+- low: scripts: Fix error in service action
+- low: history: use os.listdir to list history sessions
+- medium: ui_node: Use stonith_admin -F to fence remote nodes (bsc#967907)
+- low: ui_node: Less cryptic query when fencing node
+- low: config: Messed up previous fix (#119)
+- low: config: Clean up libdir configuration (#119)
+- medium: config: make multiarch dependency a dynamic include (#119)
+- high: ui_configure: Fix commit force (#120)
+- medium: config: add multiarch support for libdir
+- Makefile.am: do not remove generated docs
+- medium: hb_report: Don't collect logs on non-nodes (bsc#959031)
+- medium: ui_configure: Only wait for DC if resources were stopped (#117)
+- low: Fix title style vs. sentence style in cluster scripts (bsc#892108)
+- medium: command: Disable fuzzy matcher for completion (#116)
+- Remove fix for Debian
+- Remove AM_CONDITIONAL for UNAME_IS_DEBIAN
+- medium: corosync: added optional parameter [name] to "corosync add-node" function
+- added the "push" method to the "configure load" command which will remove any configuration lines that are not given in the cib/xml configuration file to import
+- medium: constants: clone-min meta attribute (new in Pacemaker 1.1.14)
+- Low: maintenance: allow action to be forced
+- Medium: history: update patterns for resource (new lrmd)
+- medium: scripts: Updated SBD cluster script
+- high: scripts: fix broken cluster init script (bsc#963135)
+- high: scripts: Improved OCFS2 cluster script (bsc#953984)
+- high: history: Parse log lines without timestamp (bsc#955581)
+
 * Fri Jan 15 2016 Kristoffer Grönlund <kgronlund at suse.com> and many others
 - Release 2.2.0
 - medium: history: Fix live report refresh (bsc#950422) (bsc#927414)
diff --git a/Makefile.am b/Makefile.am
index d89affb..8ed7b9f 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -68,21 +68,16 @@ all-local:
 		--build-base $(shell readlink -f $(builddir))/build \
 		--verbose)
 
-# Fix for GNU/Linux
-if UNAME_IS_DEBIAN
-python_prefix = 
-else
-python_prefix = --prefix=$(DESTDIR)$(prefix)
-endif
+python_prefix = --prefix=$(prefix)
 
 install-exec-local:
 	-mkdir -p $(DESTDIR)$(pkgpythondir)
 	$(PYTHON) $(srcdir)/setup.py install \
+		--root $(DESTDIR) \
 		$(python_prefix) \
 		--record $(DESTDIR)$(pkgpythondir)/install_files.txt \
 		--verbose
 	$(INSTALL) -d -m 770 $(DESTDIR)/$(CRM_CACHE_DIR)
-	-rm -rf $(generated_docs) $(generated_mans)
 
 uninstall-local:
 	cat $(DESTDIR)$(pkgpythondir)/install_files.txt | xargs rm -rf
diff --git a/README.md b/README.md
index c7ced97..1f8791c 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,8 @@
 
 [![Build Status](https://travis-ci.org/ClusterLabs/crmsh.svg?branch=master)](https://travis-ci.org/ClusterLabs/crmsh)
 
+## Introduction
+
 crmsh is a command-line interface for High-Availability cluster
 management on GNU/Linux systems, and part of the Clusterlabs
 project. It simplifies the configuration, management and
@@ -12,17 +14,20 @@ crmsh can function both as an interactive shell with tab completion
 and inline documentation, and as a command-line tool. It can also be
 used in batch mode to execute commands from files.
 
-<br />
-##### More Information
+## Documentation
 
 * The website for crmsh is here: [crmsh @ Github.io](http://crmsh.github.io).
 * Documentation for the latest stable release is found at the [Github.io documentation](http://crmsh.github.io) page.
 
-
-<br />
 ## Installation
 
-Autoconf is used to take care of platform dependent locations. It is mainly inherited from the Pacemaker source.
+The GNU Autotools suite is used to configure the OCF root directory,
+the Asciidoc tool which is used to generate documentation and the
+default daemon user (usually hacluster).
+
+It then calls the python setuptools setup.py to actually process the
+Python module sources and install into the Python system site-packages
+directory.
 
 ```shell
 ./autogen.sh
@@ -31,8 +36,6 @@ make
 make install
 ```
 
-
-<br />
 ## Manifest
 
 ```shell
@@ -45,8 +48,6 @@ make install
 ./hb_report: log file collection and analysis tool
 ```
 
-
-<br />
 ## Development
 
 crmsh is implemented in Python. The source code for crmsh is kept in a
@@ -57,6 +58,15 @@ version, install git and run this command:
 git clone https://github.com/ClusterLabs/crmsh
 ```
 
-<br />
+There is a git `pre-commit` hook used to update the data-manifest
+which lists all the data files to be installed. To install this, run
+
+```shell
+cp contrib/git-hook-pre-commit .git/hooks/pre-commit
+chmod +x .git/hooks/pre-commit
+```
+
+## Community
+
 * Bugs and issues can be reported at the [crmsh issues @ Github.com](https://github.com/clusterlabs/crmsh/issues) page.
 * Any other questions or comments can be made on the [Clusterlabs users mailing list](http://clusterlabs.org/mailman/listinfo/users).
diff --git a/configure.ac b/configure.ac
index f378f26..fcc32be 100644
--- a/configure.ac
+++ b/configure.ac
@@ -8,7 +8,7 @@ dnl License: GNU General Public License (GPL)
 
 AC_PREREQ([2.53])
 
-AC_INIT([crmsh],[2.2.0],[users at clusterlabs.org])
+AC_INIT([crmsh],[2.2.1],[users at clusterlabs.org])
 
 AC_ARG_WITH(version,
     [  --with-version=version   Override package version (if you're a packager needing to pretend) ],
@@ -41,8 +41,6 @@ dnl command line
 dnl Wrap in m4_ifdef to avoid breaking on older platforms
 m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES])
 
-AM_CONDITIONAL([UNAME_IS_DEBIAN], [test x`uname -v | grep -oh Debian` = x"Debian"])
-
 AC_SUBST(OCF_ROOT_DIR)
 AC_SUBST(CRM_DAEMON_USER)
 
diff --git a/contrib/git-hook-pre-commit b/contrib/git-hook-pre-commit
new file mode 100755
index 0000000..f4bd35a
--- /dev/null
+++ b/contrib/git-hook-pre-commit
@@ -0,0 +1,14 @@
+#!/bin/sh
+#
+# An example hook script to verify what is about to be committed.
+# Called by "git commit" with no arguments.  The hook should
+# exit with non-zero status after issuing an appropriate message if
+# it wants to stop the commit.
+#
+# To enable this hook, rename this file to "pre-commit".
+
+root="$(git rev-parse --show-toplevel)"
+[ -d "$root" ] || exit 1
+
+./update-data-manifest.sh
+git add ./data-manifest
diff --git a/contrib/pygments_crmsh_lexers/ansiclr.py b/contrib/pygments_crmsh_lexers/ansiclr.py
index 42d975e..b75a888 100644
--- a/contrib/pygments_crmsh_lexers/ansiclr.py
+++ b/contrib/pygments_crmsh_lexers/ansiclr.py
@@ -9,9 +9,8 @@
     :license: BSD, see LICENSE for details.
 """
 
-from pygments.lexer import RegexLexer, include, bygroups
-from pygments.token import Generic, Comment, String, Text, Keyword, Name, \
-    Punctuation, Number
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Generic, Text
 
 __all__ = ['ANSIColorsLexer']
 
diff --git a/contrib/pygments_crmsh_lexers/crmsh.py b/contrib/pygments_crmsh_lexers/crmsh.py
index 59b7a74..31ac223 100644
--- a/contrib/pygments_crmsh_lexers/crmsh.py
+++ b/contrib/pygments_crmsh_lexers/crmsh.py
@@ -10,11 +10,8 @@
 """
 
 
-import re
-
-from pygments.lexer import RegexLexer, bygroups, words, include, default
-from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
-    Number, Punctuation, Literal, Whitespace
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, Punctuation, Whitespace
 
 __all__ = ['CrmshLexer']
 
@@ -47,8 +44,8 @@ class CrmshLexer(RegexLexer):
     acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
     bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
     val_qual = (r'(?:string|version|number)')
-    rsc_role_action=(r'(?:Master|Started|Slave|Stopped|'
-        r'start|promote|demote|stop)')
+    rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
+                       r'start|promote|demote|stop)')
 
     tokens = {
         'root': [
@@ -68,7 +65,7 @@ class CrmshLexer(RegexLexer):
             (sub, Keyword),
             (acl, Keyword),
             # binary operators
-            (r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual,bin_ops),
+            (r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops),
                 Operator.Word),
             # other operators
             (bin_rel, Operator.Word),
diff --git a/data-manifest b/data-manifest
index d2cd550..9bc1880 100644
--- a/data-manifest
+++ b/data-manifest
@@ -72,6 +72,7 @@ test/descriptions
 test/evaltest.sh
 test/history-test.tar.bz2
 test/list-undocumented-commands.py
+test/profile-history.sh
 test/README.regression
 test/regression.sh
 test/run
@@ -147,6 +148,7 @@ test/unittests/scripts/inc2/main.yml
 test/unittests/scripts/legacy/main.yml
 test/unittests/scripts/templates/apache.xml
 test/unittests/scripts/templates/virtual-ip.xml
+test/unittests/scripts/unified/main.yml
 test/unittests/scripts/v2/main.yml
 test/unittests/scripts/vipinc/main.yml
 test/unittests/scripts/vip/main.yml
diff --git a/doc/crm.8.adoc b/doc/crm.8.adoc
index 305f334..ccd353b 100644
--- a/doc/crm.8.adoc
+++ b/doc/crm.8.adoc
@@ -1,5 +1,5 @@
 :man source:   crm
-:man version:  2.2.0
+:man version:  2.2.1
 :man manual:   crmsh documentation
 
 crm(8)
@@ -97,6 +97,11 @@ OPTIONS
     Extra directory where crm looks for cluster scripts, or a list of
     directories separated by semi-colons (e.g. +/dir1;/dir2;etc.+).
 
+*-o, --opt*='OPTION=VALUE'::
+     Set crmsh option temporarily. If the options are saved using
+     +options save+ then the value passed here will also be saved.
+     Multiple options can be set by using +-o+ multiple times.
+
 [[topics_Introduction,Introduction]]
 == Introduction
 
@@ -1211,7 +1216,7 @@ sets only the address for ring0.
 
 Usage:
 .........
-add-node <addr>
+add-node <addr> [name]
 .........
 
 [[cmdhelp_corosync_del-node,Remove a corosync node]]
@@ -2936,8 +2941,11 @@ group vm-and-services vm vm-sshd meta container="vm"
 
 Load a part of configuration (or all of it) from a local file or
 a network URL. The +replace+ method replaces the current
-configuration with the one from the source. The +update+ tries to
-import the contents into the current configuration.
+configuration with the one from the source. The +update+ method
+tries to import the contents into the current configuration. The
++push+ method imports the contents into the current configuration
+and removes any lines that are not present in the given
+configuration.
 The file may be a CLI file or an XML file.
 
 If the URL is `-`, the configuration is read from standard input.
@@ -2946,12 +2954,13 @@ Usage:
 ...............
 load [xml] <method> URL
 
-method :: replace | update
+method :: replace | update | push
 ...............
 Example:
 ...............
 load xml update myfirstcib.xml
 load xml replace http://storage.big.com/cibs/bigcib.xml
+load xml push smallcib.xml
 ...............
 
 [[cmdhelp_configure_location,a location preference]]
@@ -4335,7 +4344,7 @@ command.
 
 Example:
 ...............
-crm(live)history# timeframe "Jul 18 12:00" "Jul 18 12:30"
+crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
 crm(live)history# session save strange_restart
 crm(live)history# session pack
 Report saved in .../strange_restart.tar.bz2
@@ -4765,6 +4774,22 @@ transition log
 transition save 0 enigma-22
 ...............
 
+[[cmdhelp_history_transitions,List transitions]]
+==== `transitions`
+
+A transition represents a change in cluster configuration or
+state. This command lists the transitions in the current timeframe.
+
+Usage:
+...............
+transitions
+...............
+Example:
+...............
+transitions
+...............
+
+
 [[cmdhelp_history_wdiff,cluster states/transitions difference]]
 ==== `wdiff`
 
diff --git a/doc/website-v1/man-1.2.adoc b/doc/website-v1/man-1.2.adoc
index 3bc4523..b74f529 100644
--- a/doc/website-v1/man-1.2.adoc
+++ b/doc/website-v1/man-1.2.adoc
@@ -2982,7 +2982,7 @@ If you think you may have found a bug or just need clarification
 from developers or your support, the `session pack` command can
 help create a report. This is an example:
 ...............
-    crm(live)history# timeframe "Jul 18 12:00" "Jul 18 12:30"
+    crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
     crm(live)history# session save strange_restart
     crm(live)history# session pack
     Report saved in .../strange_restart.tar.bz2
diff --git a/doc/website-v1/man-2.0.adoc b/doc/website-v1/man-2.0.adoc
index a59e6d0..7cec97c 100644
--- a/doc/website-v1/man-2.0.adoc
+++ b/doc/website-v1/man-2.0.adoc
@@ -3801,7 +3801,7 @@ help create a report.
 
 Example:
 ...............
-crm(live)history# timeframe "Jul 18 12:00" "Jul 18 12:30"
+crm(live)history# limit "Jul 18 12:00" "Jul 18 12:30"
 crm(live)history# session save strange_restart
 crm(live)history# session pack
 Report saved in .../strange_restart.tar.bz2
diff --git a/hb_report/hb_report.in b/hb_report/hb_report.in
index cf34857..9c1bef1 100755
--- a/hb_report/hb_report.in
+++ b/hb_report/hb_report.in
@@ -25,7 +25,7 @@ LOGD_CF=`findlogdcf @sysconfdir@ $HA_DIR`
 export LOGD_CF
 
 SSH_PASSWORD_NODES=""
-: ${SSH_OPTS="-o StrictHostKeyChecking=no -o EscapeChar=none"}
+: ${SSH_OPTS="-o StrictHostKeyChecking=no -o EscapeChar=none -o ConnectTimeout=15"}
 LOG_PATTERNS="CRIT: ERROR:"
 # PEINPUTS_PATT="peng.*PEngine Input stored"
 
@@ -533,7 +533,6 @@ FROM_TIME=$FROM_TIME
 TO_TIME=$TO_TIME
 USER_NODES="$USER_NODES"
 NODES="$NODES"
-MASTER_NODE="$MASTER_NODE"
 HA_LOG=$HA_LOG
 UNIQUE_MSG=$UNIQUE_MSG
 SANITIZE="$SANITIZE"
@@ -555,9 +554,6 @@ is_collector() {
 is_node() {
 	test "$THIS_IS_NODE"
 }
-is_master() {
-	! is_collector && test "$WE" = "$MASTER_NODE"
-}
 start_slave_collector() {
 	local node=$1
 
@@ -745,6 +741,7 @@ sys_info() {
 	${HA_NOARCHBIN}/hb_report -V # our info
 	echo "resource-agents: `grep 'Build version:' @OCF_ROOT_DIR@/lib/heartbeat/ocf-shellfuncs`"
 	crm_info
+	booth --version
 	pkg_versions $PACKAGES
 	skip_lvl 1 || verify_packages $PACKAGES
 	echo "Platform: `uname`"
@@ -1111,7 +1108,7 @@ collect_journal() {
 			warning "$outf already exists"
 		fi
 		debug "journalctl from: '$1' until: '$2' from_time '$from_time' to_time: '$to_time' > $outf"
-		journalctl --since "$from_time" --until "$to_time" --no-pager | tail -n +2 > $outf
+		journalctl -o short-iso --since "$from_time" --until "$to_time" --no-pager | tail -n +2 > $outf
 	fi
 }
 #
@@ -1159,10 +1156,17 @@ finalword() {
 	else
 		echo "The report is saved in $DESTDIR/$DEST"
 	fi
+	echo "Report timespan: $(time2str "$FROM_TIME") - $(time2str "$TO_TIME")"
 	echo " "
 	echo "Thank you for taking time to create this report."
 }
 
+check_if_log_is_empty() {
+	if ! find "$1" -iname $HALOG_F | grep -q .; then
+		warning "Report contains no logs; did you get the right timeframe?"
+	fi
+}
+
 [ $# -eq 0 ] && usage
 
 # check for the major prereq for a) parameter parsing and b)
@@ -1283,6 +1287,7 @@ pacemaker-pygui pacemaker-pymgmt pymgmt-client
 openais libopenais2 libopenais3 corosync libcorosync4
 resource-agents cluster-glue libglue2 ldirectord libqb0
 heartbeat heartbeat-common heartbeat-resources libheartbeat2
+booth
 ocfs2-tools ocfs2-tools-o2cb ocfs2console
 ocfs2-kmp-default ocfs2-kmp-pae ocfs2-kmp-xen ocfs2-kmp-debug ocfs2-kmp-trace
 drbd drbd-kmp-xen drbd-kmp-pae drbd-kmp-default drbd-kmp-debug drbd-kmp-trace
@@ -1358,7 +1363,6 @@ export CONFIGURATIONS
 
 THIS_IS_NODE=""
 if ! is_collector; then
-	MASTER_NODE=$WE
 	NODES=`getnodes`
 	debug "nodes: `echo $NODES`"
 fi
@@ -1415,10 +1419,11 @@ fi
 # part 4: find the logs and cut out the segment for the period
 #
 
-# if the master is also a node, getlog is going to be invoked
-# from the collector
-(is_master && is_node) ||
-	getlog
+is_node && getlog
+
+#
+# Start slave collectors
+#
 
 if ! is_collector; then
 	for node in $NODES; do
@@ -1448,6 +1453,7 @@ else
 	fi
 	analyze $WORKDIR > $WORKDIR/$ANALYSIS_F &
 	events $WORKDIR &
+	check_if_log_is_empty $WORKDIR
 	mktemplate > $WORKDIR/$DESCRIPTION_F
 	[ "$NO_DESCRIPTION" ] || {
 		echo press enter to edit the problem description...
diff --git a/hb_report/utillib.sh b/hb_report/utillib.sh
index ff54df8..5ed57e6 100644
--- a/hb_report/utillib.sh
+++ b/hb_report/utillib.sh
@@ -517,18 +517,19 @@ crmconfig() {
 		CIB_file=$1/$CIB_F crm configure show >$1/$CIB_TXT_F 2>&1
 }
 get_crm_nodes() {
-	cibadmin -Ql -o nodes |
-	awk '
-	/<node / {
-		for( i=1; i<=NF; i++ )
-			if( $i~/^uname=/ ) {
-				sub("uname=.","",$i);
-				sub("\".*","",$i);
-				print $i;
-				next;
-			}
-	}
-	'
+	python <<EOF
+from lxml import etree
+import subprocess
+cib = etree.fromstring(subprocess.check_output(['/usr/sbin/cibadmin', '-Ql']))
+for node in cib.xpath('/cib/configuration/nodes/node'):
+    name = node.get('uname') or node.get('id')
+    if node.get('type') == 'remote':
+        srv = cib.xpath("//primitive[@id='%s']/instance_attributes/nvpair[@name='server']" % (name))
+        if srv:
+            print(srv[0].get('value'))
+            continue
+    print(name)
+EOF
 }
 get_live_nodes() {
 	if [ `id -u` = 0 ] && which fping >/dev/null 2>&1; then
diff --git a/modules/cache.py b/modules/cache.py
index 493e755..bcefd8e 100644
--- a/modules/cache.py
+++ b/modules/cache.py
@@ -1,10 +1,10 @@
 # Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic at suse.de>
 # See COPYING for license information.
+#
+# Cache stuff. A naive implementation.
 
 import time
 
-"Cache stuff. A naive implementation."
-
 
 _max_cache_age = 600  # seconds
 _stamp = time.time()
diff --git a/modules/cibconfig.py b/modules/cibconfig.py
index fa3b87f..be1b9ee 100644
--- a/modules/cibconfig.py
+++ b/modules/cibconfig.py
@@ -8,29 +8,30 @@ import sys
 import re
 import fnmatch
 import time
-from collections import defaultdict
+import collections
 from . import config
 from . import options
 from . import constants
 from . import tmpfiles
-from .parse import CliParser
 from . import clidisplay
-from .cibstatus import cib_status
 from . import idmgmt
-from .ra import get_ra, get_properties_list, get_pe_meta
 from . import schema
-from .crm_gv import gv_types
+from . import utils
+from . import cibverify
+from . import parse
+from . import ordereddict
+from . import orderedset
+from . import cibstatus
+from . import crm_gv
+from .ra import get_ra, get_properties_list, get_pe_meta, get_properties_meta
 from .msg import common_warn, common_err, common_debug, common_info, err_buf
 from .msg import common_error, constraint_norefobj_err, cib_parse_err, no_object_err
 from .msg import missing_obj_err, common_warning, update_err, unsupported_err, empty_cib_err
 from .msg import invalid_id_err, cib_ver_unsupported_err
-from . import utils
 from .utils import ext_cmd, safe_open_w, pipe_string, safe_close_w, crm_msec
 from .utils import ask, lines2cli, olist
 from .utils import page_string, cibadmin_can_patch, str2tmp
 from .utils import run_ptest, is_id_valid, edit_file, get_boolean, filter_string
-from .ordereddict import odict
-from .orderedset import oset
 from .xmlutil import is_child_rsc, rsc_constraint, sanitize_cib, rename_id, get_interesting_nodes
 from .xmlutil import is_pref_location, get_topnode, new_cib, get_rscop_defaults_meta_node
 from .xmlutil import rename_rscref, is_ms, silly_constraint, is_container, fix_comments
@@ -47,7 +48,6 @@ from .cliformat import get_score, nvpairs2list, abs_pos_score, cli_acl_roleref,
 from .cliformat import cli_nvpair, cli_acl_rule, rsc_set_constraint, get_kind, head_id_format
 from .cliformat import cli_operations, simple_rsc_constraint, cli_rule, cli_format
 from .cliformat import cli_acl_role, cli_acl_permission
-from . import cibverify
 
 
 def show_unrecognized_elems(cib_elem):
@@ -127,7 +127,7 @@ def fill_nvpairs(name, node, attrs, id_hint):
         nvpair = etree.SubElement(node, "nvpair", name=n)
         if v is not None:
             nvpair.set("value", v)
-        idmgmt.set(nvpair, None, nvpair_pfx)
+        idmgmt.set_id(nvpair, None, nvpair_pfx)
     return node
 
 
@@ -170,7 +170,7 @@ def mkxmlnvpairs(name, attrs, id_hint):
         node.set("id", id_hint)
     else:
         # operations don't need no id
-        idmgmt.set(node, None, id_hint, id_required=notops)
+        idmgmt.set_id(node, None, id_hint, id_required=notops)
     return fill_nvpairs(name, node, attrs, id_hint)
 
 
@@ -245,8 +245,8 @@ class CibObjectSet(object):
         rc, self.obj_set = cib_factory.mkobj_set(*self.args)
         self.search_rc = rc
         self.all_set = cib_factory.get_all_obj_set()
-        self.obj_ids = oset([o.obj_id for o in self.obj_set])
-        self.all_ids = oset([o.obj_id for o in self.all_set])
+        self.obj_ids = orderedset.oset([o.obj_id for o in self.obj_set])
+        self.all_ids = orderedset.oset([o.obj_id for o in self.all_set])
         self.locked_ids = self.all_ids - self.obj_ids
 
     def _open_url(self, src):
@@ -261,7 +261,7 @@ class CibObjectSet(object):
         try:
             ret = open(src)
             return ret
-        except IOError, e:
+        except IOError as e:
             common_err("could not open %s: %s" % (src, e))
         return False
 
@@ -300,9 +300,9 @@ class CibObjectSet(object):
                         continue
                 rc = True
             os.unlink(tmp)
-        except OSError, e:
+        except OSError as e:
             common_debug("unlink(%s) failure: %s" % (tmp, e))
-        except IOError, msg:
+        except IOError as msg:
             common_err(msg)
         return rc
 
@@ -318,26 +318,26 @@ class CibObjectSet(object):
             return self.search_rc
         return self._edit_save(s)
 
-    def _filter_save(self, filter, s):
+    def _filter_save(self, fltr, s):
         '''
         Pipe string s through a filter. Parse/save the output.
         If no changes are done, return silently.
         '''
-        rc, outp = filter_string(filter, s)
+        rc, outp = filter_string(fltr, s)
         if rc != 0:
             return False
         if hash(outp) == hash(s):
             return True
         return self.save(outp)
 
-    def filter(self, filter):
+    def filter(self, fltr):
         with clidisplay.nopretty():
-            s = self.repr(format=-1)
+            s = self.repr(format_mode=-1)
         # don't allow filter if one or more elements were not
         # found
         if not self.search_rc:
             return self.search_rc
-        return self._filter_save(filter, s)
+        return self._filter_save(fltr, s)
 
     def save_to_file(self, fname):
         f = safe_open_w(fname)
@@ -357,10 +357,10 @@ class CibObjectSet(object):
     def _get_gv_obj(self, gtype):
         if not self.obj_set:
             return True, None
-        if gtype not in gv_types:
+        if gtype not in crm_gv.gv_types:
             common_err("graphviz type %s is not supported" % gtype)
             return False, None
-        gv_obj = gv_types[gtype]()
+        gv_obj = crm_gv.gv_types[gtype]()
         set_graph_attrs(gv_obj, ".")
         return True, gv_obj
 
@@ -403,7 +403,7 @@ class CibObjectSet(object):
 
     def import_file(self, method, fname):
         '''
-        method: update or replace
+        method: update or replace or push
         '''
         if not cib_factory.is_cib_sane():
             return False
@@ -413,16 +413,19 @@ class CibObjectSet(object):
         s = f.read()
         if f != sys.stdin:
             f.close()
-        return self.save(s, no_remove=True, method=method)
+        if method == 'push':
+            return self.save(s, remove=True, method='update')
+        else:
+            return self.save(s, remove=False, method=method)
 
-    def repr(self, format=format):
+    def repr(self, format_mode=0):
         '''
         Return a string with objects's representations (either
         CLI or XML).
         '''
         return ''
 
-    def save(self, s, no_remove=False, method='replace'):
+    def save(self, s, remove=True, method='replace'):
         '''
         For each object:
             - try to find a corresponding object in obj_set
@@ -434,7 +437,7 @@ class CibObjectSet(object):
         '''
         pass
 
-    def __check_unique_clash(self, set_obj_all):
+    def _check_unique_clash(self, set_obj_all):
         'Check whether resource parameters with attribute "unique" clash'
         def process_primitive(prim, clash_dict):
             '''
@@ -469,7 +472,7 @@ class CibObjectSet(object):
                          if o.obj_type == "primitive"])
         if not check_set:
             return 0
-        clash_dict = defaultdict(list)
+        clash_dict = collections.defaultdict(list)
         for obj in set_obj_all.obj_set:
             node = obj.node
             if is_primitive(node):
@@ -489,7 +492,7 @@ class CibObjectSet(object):
         '''
         Test objects for sanity. This is about semantics.
         '''
-        rc = self.__check_unique_clash(set_obj_all)
+        rc = self._check_unique_clash(set_obj_all)
         for obj in sorted(self.obj_set, key=lambda x: x.obj_id):
             rc |= obj.check_sanity()
         return rc
@@ -504,15 +507,15 @@ class CibObjectSetCli(CibObjectSet):
     def __init__(self, *args):
         CibObjectSet.__init__(self, *args)
 
-    def repr_nopretty(self, format=1):
+    def repr_nopretty(self, format_mode=1):
         with clidisplay.nopretty():
-            return self.repr(format=format)
+            return self.repr(format_mode=format_mode)
 
-    def repr(self, format=1):
+    def repr(self, format_mode=1):
         "Return a string containing cli format of all objects."
         if not self.obj_set:
             return ''
-        return '\n'.join(obj.repr_cli(format=format)
+        return '\n'.join(obj.repr_cli(format_mode=format_mode)
                          for obj in processing_sort_cli(list(self.obj_set)))
 
     def _pre_edit(self, s):
@@ -538,7 +541,7 @@ class CibObjectSetCli(CibObjectSet):
             return node[0].get('id')
         return node.get('id')
 
-    def save(self, s, no_remove=False, method='replace'):
+    def save(self, s, remove=True, method='replace'):
         '''
         Save a user supplied cli format configuration.
         On errors user is typically asked to review the
@@ -551,7 +554,7 @@ class CibObjectSetCli(CibObjectSet):
         diff = CibDiff(self)
         rc = True
         err_buf.start_tmp_lineno()
-        cp = CliParser()
+        cp = parse.CliParser()
         for cli_text in lines2cli(s):
             err_buf.incr_lineno()
             node = cp.parse(cli_text)
@@ -566,7 +569,7 @@ class CibObjectSetCli(CibObjectSet):
         if not rc:
             return rc
 
-        rc = diff.apply(cib_factory, mode='cli', no_remove=no_remove, method=method)
+        rc = diff.apply(cib_factory, mode='cli', remove=remove, method=method)
         if not rc:
             self._initialize()
         return rc
@@ -579,7 +582,7 @@ class CibObjectSetRaw(CibObjectSet):
     def __init__(self, *args):
         CibObjectSet.__init__(self, *args)
 
-    def repr(self, format="ignored"):
+    def repr(self, format_mode="ignored"):
         "Return a string containing xml of all objects."
         cib_elem = cib_factory.obj_set2cib(self.obj_set)
         s = etree.tostring(cib_elem, pretty_print=True)
@@ -590,10 +593,10 @@ class CibObjectSetRaw(CibObjectSet):
             return "fencing_topology"
         return node.get("id")
 
-    def save(self, s, no_remove=False, method='replace'):
+    def save(self, s, remove=True, method='replace'):
         try:
             cib_elem = etree.fromstring(s)
-        except etree.ParseError, msg:
+        except etree.ParseError as msg:
             cib_parse_err(msg, s)
             return False
         sanitize_cib(cib_elem)
@@ -605,7 +608,7 @@ class CibObjectSetRaw(CibObjectSet):
             rc = diff.add(node)
         if not rc:
             return rc
-        rc = diff.apply(cib_factory, mode='xml', no_remove=no_remove, method=method)
+        rc = diff.apply(cib_factory, mode='xml', remove=remove, method=method)
         if not rc:
             self._initialize()
         return rc
@@ -614,7 +617,7 @@ class CibObjectSetRaw(CibObjectSet):
         if not self.obj_set:
             return True
         with clidisplay.nopretty():
-            cib = self.repr(format=-1)
+            cib = self.repr(format_mode=-1)
         rc = cibverify.verify(cib)
 
         if rc not in (0, 1):
@@ -625,7 +628,7 @@ class CibObjectSetRaw(CibObjectSet):
         if not cib_factory.is_cib_sane():
             return False
         cib_elem = cib_factory.obj_set2cib(self.obj_set)
-        status = cib_status.get_status()
+        status = cibstatus.cib_status.get_status()
         if status is None:
             common_err("no status section found")
             return False
@@ -675,7 +678,7 @@ def fix_node_ids(node, oldnode):
     def recurse(node, oldnode, prefix):
         refnode = lookup_node(node, oldnode)
         if needs_id(node):
-            idmgmt.set(node, refnode, prefix, id_required=(node.tag not in idless))
+            idmgmt.set_id(node, refnode, prefix, id_required=(node.tag not in idless))
         prefix = next_prefix(node, refnode, prefix)
         for c in node.iterchildren():
             if not is_comment(c):
@@ -744,14 +747,14 @@ def id_for_node(node, id_hint=None):
         if node.tag == 'op':
             if id_hint is None:
                 id_hint = node.get("rsc")
-            idmgmt.set(node, None, id_hint)
+            idmgmt.set_id(node, None, id_hint)
             obj_id = node.get('id')
         else:
             defid = default_id_for_tag(root.tag)
             if defid is not None:
                 try:
                     node.set('id', defid)
-                except TypeError, e:
+                except TypeError as e:
                     raise ValueError('Internal error: %s (%s)' % (e, etree.tostring(node)))
                 obj_id = node.get('id')
                 idmgmt.save(obj_id)
@@ -792,7 +795,7 @@ def parse_cli_to_xml(cli, oldnode=None, validation=None):
     input: CLI text
     output: XML, obj_type, obj_id
     """
-    parser = CliParser()
+    parser = parse.CliParser()
     if validation is not None:
         for p in parser.parsers.values():
             p.validation = validation
@@ -842,7 +845,7 @@ class CibObject(object):
         self.updated = True
         self.propagate_updated()
 
-    def _dump_state(self):
+    def dump_state(self):
         'Print object status'
         print self.state_fmt % (self.obj_id,
                                 self.origin,
@@ -850,12 +853,12 @@ class CibObject(object):
                                 self.parent and self.parent.obj_id or "",
                                 len(self.children))
 
-    def _repr_cli_xml(self, format):
-        with clidisplay.nopretty(format < 0):
+    def _repr_cli_xml(self, format_mode):
+        with clidisplay.nopretty(format_mode < 0):
             h = clidisplay.keyword("xml")
             l = etree.tostring(self.node, pretty_print=True).split('\n')
             l = [x for x in l if x]  # drop empty lines
-            return "%s %s" % (h, cli_format(l, break_lines=(format > 0), xml=True))
+            return "%s %s" % (h, cli_format(l, break_lines=(format_mode > 0), xml=True))
 
     def _gv_rsc_id(self):
         if self.parent and self.parent.obj_type in constants.clonems_tags:
@@ -884,20 +887,20 @@ class CibObject(object):
         '''
         pass
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         'implemented in subclasses'
         pass
 
-    def repr_cli(self, format=1):
+    def repr_cli(self, format_mode=1):
         '''
         CLI representation for the node.
         _repr_cli_head and _repr_cli_child in subclasess.
         '''
         if self.nocli:
-            return self._repr_cli_xml(format)
+            return self._repr_cli_xml(format_mode)
         l = []
-        with clidisplay.nopretty(format < 0):
-            head_s = self._repr_cli_head(format)
+        with clidisplay.nopretty(format_mode < 0):
+            head_s = self._repr_cli_head(format_mode)
             # everybody must have a head
             if not head_s:
                 return None
@@ -910,10 +913,10 @@ class CibObject(object):
                 if is_comment(c):
                     comments.append(c.text)
                     continue
-                s = self._repr_cli_child(c, format)
+                s = self._repr_cli_child(c, format_mode)
                 if s:
                     l.append(s)
-            return self._cli_format_and_comment(l, comments, break_lines=(format > 0))
+            return self._cli_format_and_comment(l, comments, format_mode=format_mode)
 
     def _attr_set_str(self, node):
         '''
@@ -953,7 +956,7 @@ class CibObject(object):
             ret = ret[:-1]
         return ret
 
-    def _repr_cli_child(self, c, format):
+    def _repr_cli_child(self, c, format_mode):
         if c.tag in self.set_names:
             return self._attr_set_str(c)
 
@@ -993,13 +996,15 @@ class CibObject(object):
         self.set_id()
         return self.node
 
-    def _cli_format_and_comment(self, l, comments, break_lines):
+    def _cli_format_and_comment(self, l, comments, format_mode):
         '''
         Format and add comment (if any).
         '''
-        s = cli_format(l, break_lines=break_lines)
+        s = cli_format(l, break_lines=(format_mode > 0))
         cs = '\n'.join(comments)
-        return (comments and format >= 0) and '\n'.join([cs, s]) or s
+        if len(comments) and format_mode >= 0:
+            return '\n'.join([cs, s])
+        return s
 
     def move_comments(self):
         '''
@@ -1068,7 +1073,7 @@ class CibObject(object):
         if self.node is None:
             return True
         with clidisplay.nopretty():
-            cli_text = self.repr_cli(format=0)
+            cli_text = self.repr_cli(format_mode=0)
         if not cli_text:
             common_debug("validation failed: %s" % (etree.tostring(self.node)))
             return False
@@ -1146,7 +1151,7 @@ class CibObject(object):
 
     def find_child_in_node(self, child):
         for c in self.node.iterchildren():
-            if c.tag == child.obj_type and \
+            if c.tag == child.node.tag and \
                     c.get("id") == child.obj_id:
                 return c
         return None
@@ -1207,7 +1212,7 @@ class CibNode(CibObject):
         "utilization": "utilization",
     }
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         uname = self.node.get("uname")
         s = clidisplay.keyword(self.obj_type)
         if self.obj_id != uname:
@@ -1215,10 +1220,10 @@ class CibNode(CibObject):
                 s = "%s %s:" % (s, self.obj_id)
             else:
                 s = '%s $id="%s"' % (s, self.obj_id)
-        s = '%s %s' % (s, clidisplay.id(uname))
-        type = self.node.get("type")
-        if type and type != constants.node_default_type:
-            s = '%s:%s' % (s, type)
+        s = '%s %s' % (s, clidisplay.ident(uname))
+        node_type = self.node.get("type")
+        if node_type and node_type != constants.node_default_type:
+            s = '%s:%s' % (s, node_type)
         return s
 
     def repr_gv(self, gv_obj, from_grp=False):
@@ -1261,7 +1266,7 @@ class Op(object):
     def __init__(self, op_name, prim, node=None):
         self.prim = prim
         self.node = node
-        self.attr_d = odict()
+        self.attr_d = ordereddict.odict()
         self.attr_d["name"] = op_name
         if self.node is not None:
             self.xml2dict()
@@ -1305,7 +1310,7 @@ class Op(object):
                 self.node.set(n, v)
             else:
                 inst_attr[n] = v
-        idmgmt.set(self.node, None, self.prim)
+        idmgmt.set_id(self.node, None, self.prim)
         if inst_attr:
             nia = mkxmlnvpairs("instance_attributes", inst_attr, self.node.get("id"))
             self.node.append(nia)
@@ -1323,7 +1328,7 @@ class CibPrimitive(CibObject):
         "utilization": "utilization",
     }
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         if self.obj_type == "primitive":
             template_ref = self.node.get("template")
         else:
@@ -1333,14 +1338,14 @@ class CibPrimitive(CibObject):
         else:
             rsc_spec = mk_rsc_type(self.node)
         s = clidisplay.keyword(self.obj_type)
-        id = clidisplay.id(self.obj_id)
-        return "%s %s %s" % (s, id, rsc_spec)
+        ident = clidisplay.ident(self.obj_id)
+        return "%s %s %s" % (s, ident, rsc_spec)
 
-    def _repr_cli_child(self, c, format):
+    def _repr_cli_child(self, c, format_mode):
         if c.tag in self.set_names:
             return self._attr_set_str(c)
         elif c.tag == "operations":
-            return cli_operations(c, break_lines=(format > 0))
+            return cli_operations(c, break_lines=(format_mode > 0))
 
     def _append_op(self, op_node):
         try:
@@ -1359,7 +1364,7 @@ class CibPrimitive(CibObject):
             return None
         # create an xml node
         if 'id' not in node.attrib:
-            idmgmt.set(node, None, self.obj_id)
+            idmgmt.set_id(node, None, self.obj_id)
         valid_attrs = olist(schema.get('attr', 'op', 'a'))
         inst_attr = {}
         for attr in node.attrib.keys():
@@ -1522,7 +1527,7 @@ class CibContainer(CibObject):
         "meta_attributes": "meta",
     }
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         children = []
         for c in self.node.iterchildren():
             if (self.obj_type == "group" and is_primitive(c)) or \
@@ -1531,8 +1536,8 @@ class CibContainer(CibObject):
             elif self.obj_type in constants.clonems_tags and is_child_rsc(c):
                 children.append(clidisplay.rscref(c.get("id")))
         s = clidisplay.keyword(self.obj_type)
-        id = clidisplay.id(self.obj_id)
-        return "%s %s %s" % (s, id, ' '.join(children))
+        ident = clidisplay.ident(self.obj_id)
+        return "%s %s %s" % (s, ident, ' '.join(children))
 
     def check_sanity(self):
         '''
@@ -1575,7 +1580,7 @@ def _check_if_constraint_ref_is_child(obj):
     a container.
     """
     rc = 0
-    for rscid in obj._referenced_resources():
+    for rscid in obj.referenced_resources():
         tgt = cib_factory.find_object(rscid)
         if not tgt:
             common_warn("%s: resource %s does not exist" % (obj.obj_id, rscid))
@@ -1595,7 +1600,7 @@ class CibLocation(CibObject):
     Location constraint.
     '''
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         rsc = None
         if "rsc" in self.node.keys():
             rsc = self.node.get("rsc")
@@ -1609,8 +1614,8 @@ class CibLocation(CibObject):
             common_err("%s: unknown rsc_location format" % self.obj_id)
             return None
         s = clidisplay.keyword(self.obj_type)
-        id = clidisplay.id(self.obj_id)
-        s = "%s %s %s" % (s, id, rsc)
+        ident = clidisplay.ident(self.obj_id)
+        s = "%s %s %s" % (s, ident, rsc)
 
         known_attrs = ['role', 'resource-discovery']
         for attr in known_attrs:
@@ -1624,7 +1629,7 @@ class CibLocation(CibObject):
             s = "%s %s: %s" % (s, score, pref_node)
         return s
 
-    def _repr_cli_child(self, c, format):
+    def _repr_cli_child(self, c, format_mode):
         if c.tag == "rule":
             return "%s %s" % \
                 (clidisplay.keyword("rule"), cli_rule(c))
@@ -1638,7 +1643,7 @@ class CibLocation(CibObject):
             return utils.get_check_rc()
         rc = 0
         uname = self.node.get("node")
-        if uname and uname.lower() not in [id.lower() for id in cib_factory.node_id_list()]:
+        if uname and uname.lower() not in [ident.lower() for ident in cib_factory.node_id_list()]:
             common_warn("%s: referenced node %s does not exist" % (self.obj_id, uname))
             rc = 1
         pattern = self.node.get("rsc-pattern")
@@ -1665,7 +1670,7 @@ class CibLocation(CibObject):
             rc = rc2
         return rc
 
-    def _referenced_resources(self):
+    def referenced_resources(self):
         ret = self.node.xpath('.//resource_set/resource_ref/@id')
         return ret or [self.node.get("rsc")]
 
@@ -1735,9 +1740,9 @@ class CibSimpleConstraint(CibObject):
     Colocation and order constraints.
     '''
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         s = clidisplay.keyword(self.obj_type)
-        id = clidisplay.id(self.obj_id)
+        ident = clidisplay.ident(self.obj_id)
         score = get_score(self.node) or get_kind(self.node)
         if self.node.find("resource_set") is not None:
             col = rsc_set_constraint(self.node, self.obj_type)
@@ -1753,7 +1758,7 @@ class CibSimpleConstraint(CibObject):
             node_attr = self.node.get("node-attribute")
             if node_attr:
                 col.append("node-attribute=%s" % node_attr)
-        s = "%s %s " % (s, id)
+        s = "%s %s " % (s, ident)
         if score != '':
             s += "%s: " % (clidisplay.score(score))
         return s + ' '.join(col)
@@ -1805,7 +1810,7 @@ class CibSimpleConstraint(CibObject):
                 self.node.get("first"),
                 self.node.get("then")])
 
-    def _referenced_resources(self):
+    def referenced_resources(self):
         ret = self.node.xpath('.//resource_set/resource_ref/@id')
         if ret:
             return ret
@@ -1828,9 +1833,9 @@ class CibRscTicket(CibSimpleConstraint):
     rsc_ticket constraint.
     '''
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         s = clidisplay.keyword(self.obj_type)
-        id = clidisplay.id(self.obj_id)
+        ident = clidisplay.ident(self.obj_id)
         ticket = clidisplay.ticket(self.node.get("ticket"))
         if self.node.find("resource_set") is not None:
             col = rsc_set_constraint(self.node, self.obj_type)
@@ -1841,7 +1846,7 @@ class CibRscTicket(CibSimpleConstraint):
         a = self.node.get("loss-policy")
         if a:
             col.append("loss-policy=%s" % a)
-        return "%s %s %s: %s" % (s, id, ticket, ' '.join(col))
+        return "%s %s %s: %s" % (s, ident, ticket, ' '.join(col))
 
 
 class CibProperty(CibObject):
@@ -1849,11 +1854,11 @@ class CibProperty(CibObject):
     Cluster properties.
     '''
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         return "%s %s" % (clidisplay.keyword(self.obj_type),
                           head_id_format(self.obj_id))
 
-    def _repr_cli_child(self, c, format):
+    def _repr_cli_child(self, c, format_mode):
         if c.tag == "rule":
             return ' '.join((clidisplay.keyword("rule"),
                              cli_rule(c)))
@@ -1916,9 +1921,9 @@ class CibFencingOrder(CibObject):
         ''' Cannot rename this one. '''
         return False
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         s = clidisplay.keyword(self.obj_type)
-        d = odict()
+        d = ordereddict.odict()
         for c in self.node.iterchildren("fencing-level"):
             if "target-attribute" in c.attrib:
                 target = (c.get("target-attribute"), c.get("target-value"))
@@ -1927,7 +1932,7 @@ class CibFencingOrder(CibObject):
             if target not in d:
                 d[target] = {}
             d[target][c.get("index")] = c.get("devices")
-        dd = odict()
+        dd = ordereddict.odict()
         for target in d.keys():
             sorted_keys = sorted([int(i) for i in d[target].keys()])
             dd[target] = [d[target][str(x)] for x in sorted_keys]
@@ -1945,9 +1950,9 @@ class CibFencingOrder(CibObject):
                 return tgt + ":"
         return cli_format([s] + ["%s %s" % (fmt_target(x), ' '.join(dd[x]))
                                  for x in dd.keys()],
-                          break_lines=(format > 0))
+                          break_lines=(format_mode > 0))
 
-    def _repr_cli_child(self, c, format):
+    def _repr_cli_child(self, c, format_mode):
         pass  # no children here
 
     def check_sanity(self):
@@ -1960,7 +1965,7 @@ class CibFencingOrder(CibObject):
         rc = 0
         nl = self.node.findall("fencing-level")
         for target in [x.get("target") for x in nl if x.get("target") is not None]:
-            if target.lower() not in [id.lower() for id in cib_factory.node_id_list()]:
+            if target.lower() not in [ident.lower() for ident in cib_factory.node_id_list()]:
                 common_warn("%s: target %s not a node" % (self.obj_id, target))
                 rc = 1
         stonith_rsc_l = [x.obj_id for x in
@@ -1985,16 +1990,16 @@ class CibAcl(CibObject):
 
     '''
 
-    def _repr_cli_head(self, format):
+    def _repr_cli_head(self, format_mode):
         s = clidisplay.keyword(self.obj_type)
-        id = clidisplay.id(self.obj_id)
-        return "%s %s" % (s, id)
+        ident = clidisplay.ident(self.obj_id)
+        return "%s %s" % (s, ident)
 
-    def _repr_cli_child(self, c, format):
+    def _repr_cli_child(self, c, format_mode):
         if c.tag in constants.acl_rule_names:
-            return cli_acl_rule(c, format)
+            return cli_acl_rule(c, format_mode)
         elif c.tag == "role_ref":
-            return cli_acl_roleref(c, format)
+            return cli_acl_roleref(c, format_mode)
         elif c.tag == "role":
             return cli_acl_role(c)
         elif c.tag == "acl_permission":
@@ -2011,7 +2016,7 @@ class CibTag(CibObject):
 
     def _repr_cli_head(self, fmt):
         return ' '.join([clidisplay.keyword(self.obj_type),
-                         clidisplay.id(self.obj_id)] +
+                         clidisplay.ident(self.obj_id)] +
                         [clidisplay.rscref(c.get('id'))
                          for c in self.node.iterchildren() if not is_comment(c)])
 
@@ -2064,7 +2069,7 @@ cib_object_map = {
 
 
 # generate a translation cli -> tag
-backtrans = odict((item[0], key) for key, item in cib_object_map.iteritems())
+backtrans = ordereddict.odict((item[0], key) for key, item in cib_object_map.iteritems())
 
 
 def default_id_for_tag(tag):
@@ -2094,9 +2099,9 @@ class CibDiff(object):
     '''
     def __init__(self, objset):
         self.objset = objset
-        self._node_set = oset()
+        self._node_set = orderedset.oset()
         self._nodes = {}
-        self._rsc_set = oset()
+        self._rsc_set = orderedset.oset()
         self._resources = {}
 
     def add(self, item):
@@ -2139,12 +2144,12 @@ class CibDiff(object):
         return False
 
     def _obj_nodes(self):
-        return oset([n for n in self.objset.obj_ids
-                     if self._is_node(n)])
+        return orderedset.oset([n for n in self.objset.obj_ids
+                                if self._is_node(n)])
 
     def _obj_resources(self):
-        return oset([n for n in self.objset.obj_ids
-                     if self._is_resource(n)])
+        return orderedset.oset([n for n in self.objset.obj_ids
+                                if self._is_resource(n)])
 
     def _is_edit_valid(self, id_set, existing):
         '''
@@ -2168,7 +2173,7 @@ class CibDiff(object):
             rc = False
         return rc
 
-    def apply(self, factory, mode='cli', no_remove=False, method='replace'):
+    def apply(self, factory, mode='cli', remove=True, method='replace'):
         rc = True
 
         edited_nodes = self._nodes.copy()
@@ -2176,11 +2181,11 @@ class CibDiff(object):
 
         def calc_sets(input_set, existing):
             rc = True
-            if not no_remove:
+            if remove:
                 rc = self._is_edit_valid(input_set, existing)
                 del_set = existing - (input_set)
             else:
-                del_set = oset()
+                del_set = orderedset.oset()
             mk_set = (input_set) - existing
             upd_set = (input_set) & existing
             return rc, mk_set, upd_set, del_set
@@ -2432,11 +2437,11 @@ class CibFactory(object):
     def showobjects(self):
         self._state_header()
         for obj in self.cib_objects:
-            obj._dump_state()
+            obj.dump_state()
         if self.remove_queue:
             print "Remove queue:"
             for obj in self.remove_queue:
-                obj._dump_state()
+                obj.dump_state()
 
     def commit(self, force=False, replace=False):
         'Commit the configuration to the CIB.'
@@ -2808,19 +2813,19 @@ class CibFactory(object):
                 return obj
         return None
 
-    def find_xml_node(self, tag, id, strict=True):
+    def find_xml_node(self, tag, ident, strict=True):
         "Find a xml node of this type with this id."
         try:
             if tag in constants.defaults_tags:
-                expr = '//%s/meta_attributes[@id="%s"]' % (tag, id)
+                expr = '//%s/meta_attributes[@id="%s"]' % (tag, ident)
             elif tag == 'fencing-topology':
                 expr = '//fencing-topology' % tag
             else:
-                expr = '//%s[@id="%s"]' % (tag, id)
+                expr = '//%s[@id="%s"]' % (tag, ident)
             return self.cib_elem.xpath(expr)[0]
         except IndexError:
             if strict:
-                common_warn("strange, %s element %s not found" % (tag, id))
+                common_warn("strange, %s element %s not found" % (tag, ident))
             return None
 
     #
@@ -2901,13 +2906,13 @@ class CibFactory(object):
                 obj.set_updated()
         return rc
 
-    def is_id_refd(self, attr_list_type, id):
+    def is_id_refd(self, attr_list_type, ident):
         '''
         Is this ID referenced anywhere?
         Used from cliformat
         '''
         try:
-            return self.id_refs[id] == attr_list_type
+            return self.id_refs[ident] == attr_list_type
         except KeyError:
             return False
 
@@ -2942,11 +2947,24 @@ class CibFactory(object):
                         return n.get('value')
         return None
 
-    def get_property(self, property):
+    def get_property(self, prop):
         '''
         Get the value of the given cluster property.
         '''
-        return self._get_attr_value("property", property)
+        return self._get_attr_value("property", prop)
+
+    def get_property_w_default(self, prop):
+        '''
+        Get the value of the given property. If it is
+        not set, return the default value.
+        '''
+        v = self.get_property(prop)
+        if v is None:
+            try:
+                v = get_properties_meta().param_default(prop)
+            except:
+                pass
+        return v
 
     def get_op_default(self, attr):
         '''
@@ -3003,20 +3021,20 @@ class CibFactory(object):
         if args[0] == "NOOBJ":
             return True, []
         rc = True
-        obj_set = oset([])
+        obj_set = orderedset.oset([])
         for spec in args:
             if spec == "changed":
-                obj_set |= oset(self.modified_elems())
+                obj_set |= orderedset.oset(self.modified_elems())
             elif spec.startswith("type:"):
-                obj_set |= oset(self.get_elems_on_type(spec))
+                obj_set |= orderedset.oset(self.get_elems_on_type(spec))
             elif spec.startswith("tag:"):
-                obj_set |= oset(self.get_elems_on_tag(spec))
+                obj_set |= orderedset.oset(self.get_elems_on_tag(spec))
             elif spec.startswith("related:"):
                 name = spec[len("related:"):]
-                obj_set |= oset(self.find_objects(name) or [])
+                obj_set |= orderedset.oset(self.find_objects(name) or [])
                 obj = self.find_object(name)
                 if obj is not None:
-                    obj_set |= oset(self.related_elements(obj))
+                    obj_set |= orderedset.oset(self.related_elements(obj))
             else:
                 objs = self.find_objects(spec) or []
                 for obj in objs:
@@ -3093,7 +3111,7 @@ class CibFactory(object):
         if child.parent and child.parent.obj_id != obj_id:
             common_err("%s already in use at %s" % (child_id, child.parent.obj_id))
             return False
-        if child.obj_type not in constants.children_tags:
+        if child.node.tag not in constants.children_tags:
             common_err("%s may contain a primitive or a group; %s is %s" %
                        (parent_tag, child_id, child.obj_type))
             return False
@@ -3293,17 +3311,17 @@ class CibFactory(object):
                 return False
             test_l.append(obj)
 
-        for id in upd_set:
-            if edit_d[id].tag == 'node':
-                obj = self.find_node(id)
+        for ident in upd_set:
+            if edit_d[ident].tag == 'node':
+                obj = self.find_node(ident)
             else:
-                obj = self.find_resource(id)
+                obj = self.find_resource(ident)
             if not obj:
-                common_debug("%s not found!" % (id))
+                common_debug("%s not found!" % (ident))
                 return False
-            node, _, _ = postprocess_cli(edit_d[id], oldnode=obj.node)
+            node, _, _ = postprocess_cli(edit_d[ident], oldnode=obj.node)
             if node is None:
-                common_debug("postprocess_cli failed: %s" % (id))
+                common_debug("postprocess_cli failed: %s" % (ident))
                 return False
             if not self.update_from_cli(obj, node, method):
                 common_debug("update_from_cli failed: %s, %s, %s" %
@@ -3336,14 +3354,14 @@ class CibFactory(object):
             if not obj:
                 return False
             test_l.append(obj)
-        for id in upd_set:
-            if edit_d[id].tag == 'node':
-                obj = self.find_node(id)
+        for ident in upd_set:
+            if edit_d[ident].tag == 'node':
+                obj = self.find_node(ident)
             else:
-                obj = self.find_resource(id)
+                obj = self.find_resource(ident)
             if not obj:
                 return False
-            if not self.update_from_node(obj, edit_d[id]):
+            if not self.update_from_node(obj, edit_d[ident]):
                 return False
             test_l.append(obj)
         if not self.delete(*list(del_set)):
diff --git a/modules/cibstatus.py b/modules/cibstatus.py
index 8034b5e..14600f9 100644
--- a/modules/cibstatus.py
+++ b/modules/cibstatus.py
@@ -12,10 +12,10 @@ from . import utils
 from . import config
 
 
-def get_tag_by_id(node, tag, id):
+def get_tag_by_id(node, tag, ident):
     "Find a doc node which matches tag and id."
     for n in node.xpath(".//%s" % tag):
-        if n.get("id") == id:
+        if n.get("id") == ident:
             return n
     return None
 
diff --git a/modules/clidisplay.py b/modules/clidisplay.py
index f41ad70..fdcc50d 100644
--- a/modules/clidisplay.py
+++ b/modules/clidisplay.py
@@ -5,6 +5,8 @@
 Display output for various syntax elements.
 """
 
+from contextlib import contextmanager
+
 from . import config
 
 
@@ -22,16 +24,14 @@ def disable_pretty():
     _pretty = False
 
 
-class nopretty(object):
-    def __init__(self, cond=True):
-        self.cond = cond
-
-    def __enter__(self):
-        if self.cond:
-            disable_pretty()
-
-    def __exit__(self, type, value, traceback):
-        if self.cond:
+ at contextmanager
+def nopretty(cond=True):
+    if cond:
+        disable_pretty()
+    try:
+        yield
+    finally:
+        if cond:
             enable_pretty()
 
 
@@ -98,7 +98,7 @@ def help_block(s):
     return _colorize(s, config.color.help_block)
 
 
-def id(s):
+def ident(s):
     return _colorize(s, config.color.identifier)
 
 
diff --git a/modules/cliformat.py b/modules/cliformat.py
index 106325b..3eb17d9 100644
--- a/modules/cliformat.py
+++ b/modules/cliformat.py
@@ -22,8 +22,8 @@ def cli_format(pl, break_lines=True, xml=False):
 def head_id_format(nodeid):
     "Special format for property list / node id"
     if utils.noquotes(nodeid):
-        return "%s:" % (clidisplay.id(nodeid))
-    return '%s="%s"' % (clidisplay.id('$id'),
+        return "%s:" % (clidisplay.ident(nodeid))
+    return '%s="%s"' % (clidisplay.ident('$id'),
                         clidisplay.attr_value(nodeid))
 
 
@@ -154,9 +154,9 @@ def binary_op_format(op):
 
 def exp2cli(node):
     operation = node.get("operation")
-    type = node.get("type")
-    if type:
-        operation = "%s:%s" % (type, operation)
+    typ = node.get("type")
+    if typ:
+        operation = "%s:%s" % (typ, operation)
     attribute = node.get("attribute")
     value = node.get("value")
     if not value:
@@ -350,7 +350,7 @@ def acl_spec_format(xml_spec, v):
     return v_f and '%s:%s' % (key_f, v_f) or key_f
 
 
-def cli_acl_rule(node, format=1):
+def cli_acl_rule(node, format_mode=1):
     l = []
     acl_rule_name = node.tag
     l.append(clidisplay.keyword(acl_rule_name))
@@ -361,7 +361,7 @@ def cli_acl_rule(node, format=1):
     return ' '.join(l)
 
 
-def cli_acl_roleref(node, format=1):
+def cli_acl_roleref(node, format_mode=1):
     return "%s:%s" % (clidisplay.keyword("role"),
                       clidisplay.attr_value(node.get("id")))
 
diff --git a/modules/command.py b/modules/command.py
index 45b115a..0b1d387 100644
--- a/modules/command.py
+++ b/modules/command.py
@@ -205,12 +205,15 @@ def fuzzy_get(items, s):
     import re
 
     def fuzzy_match(rx):
-        matcher = re.compile(rx, re.I)
-        matches = [c
-                   for m, c in items.iteritems()
-                   if matcher.match(m)]
-        if len(matches) == 1:
-            return matches[0]
+        try:
+            matcher = re.compile(rx, re.I)
+            matches = [c
+                       for m, c in items.iteritems()
+                       if matcher.match(m)]
+            if len(matches) == 1:
+                return matches[0]
+        except re.error as e:
+            raise ValueError(e)
         return None
 
     # prefix match
@@ -379,7 +382,11 @@ Examples:
         If none is found, a fuzzy matcher is used to
         pick a close match
         '''
-        return fuzzy_get(self._children, child)
+        from . import options
+        if options.shell_completion:
+            return self._children.get(child)
+        else:
+            return fuzzy_get(self._children, child)
 
     def is_sublevel(self, child):
         '''
@@ -389,6 +396,10 @@ Examples:
         return sub and sub.type == 'level'
 
     @classmethod
+    def children(self):
+        return self._children
+
+    @classmethod
     def init_ui(cls):
         def get_if_command(attr):
             "Return the named attribute if it's a command"
diff --git a/modules/config.py b/modules/config.py
index 25eaea7..19a4ad4 100644
--- a/modules/config.py
+++ b/modules/config.py
@@ -13,14 +13,31 @@ except ImportError:
 from . import userdir
 
 
+def configure_libdir():
+    '''
+    sysconfig is only available in 2.7 and above
+    MULTIARCH is a debian specific configuration variable
+    '''
+    dirs = ('/usr/lib64', '/usr/libexec', '/usr/lib',
+            '/usr/local/lib64', '/usr/local/libexec', '/usr/local/lib')
+    try:
+        import sysconfig
+        multiarch = sysconfig.get_config_var('MULTIARCH')
+        if multiarch:
+            dirs += ('/usr/lib/%s' % multiarch,
+                     '/usr/local/lib/%s' % multiarch)
+    except ImportError:
+        pass
+    return dirs
+
+
 _SYSTEMWIDE = '/etc/crm/crm.conf'
 _PERUSER = os.getenv("CRM_CONFIG_FILE") or os.path.join(userdir.CONFIG_HOME, 'crm.conf')
 
 _PATHLIST = {
     'datadir': ('/usr/share', '/usr/local/share', '/opt'),
     'cachedir': ('/var/cache', '/opt/cache'),
-    'libdir': ('/usr/lib64', '/usr/libexec', '/usr/lib',
-               '/usr/local/lib64', '/usr/local/libexec', '/usr/local/lib'),
+    'libdir': configure_libdir(),
     'varlib': ('/var/lib', '/opt/var/lib'),
     'wwwdir': ('/srv/www', '/var/www')
 }
diff --git a/modules/constants.py b/modules/constants.py
index 0902492..928ae98 100644
--- a/modules/constants.py
+++ b/modules/constants.py
@@ -114,9 +114,10 @@ op_cli_names = ("monitor",
                 "migrate_from",
                 "promote",
                 "demote",
-                "notify")
-ra_operations = ("probe", "monitor", "start", "stop",
-                 "promote", "demote", "notify", "migrate_to", "migrate_from")
+                "notify",
+                "reload")
+ra_operations = tuple(["probe"] + list(op_cli_names))
+
 subpfx_list = {
     "instance_attributes": "instance_attributes",
     "meta_attributes": "meta_attributes",
@@ -212,6 +213,7 @@ group_meta_attributes = ("container", )
 clone_meta_attributes = (
     "ordered", "notify", "interleave", "globally-unique",
     "clone-max", "clone-node-max", "clone-state", "description",
+    "clone-min",
 )
 ms_meta_attributes = (
     "master-max", "master-node-max", "description",
diff --git a/modules/corosync.py b/modules/corosync.py
index cba061f..6401f52 100644
--- a/modules/corosync.py
+++ b/modules/corosync.py
@@ -327,17 +327,22 @@ def diff_configuration(nodes, checksum=False):
         utils.remote_diff(local_path, nodes)
 
 
-def next_nodeid(parser):
+def get_free_nodeid(parser):
     ids = parser.get_all('nodelist.node.nodeid')
     if not ids:
         return 1
-    return max([int(i) for i in ids]) + 1
+    ids = [int(i) for i in ids]
+    max_id = max(ids) + 1
+    for i in xrange(1, max_id):
+        if i not in ids:
+            return i
+    return max_id
 
 
 def get_ip(node):
     try:
         return socket.gethostbyname(node)
-    except:
+    except socket.error:
         return None
 
 
@@ -368,35 +373,45 @@ def set_value(path, value):
     f.close()
 
 
-def add_node(name):
+def add_node(addr, name=None):
     '''
     Add node to corosync.conf
     '''
     coronodes = None
     nodes = None
+    nodenames = None
     coronodes = utils.list_corosync_nodes()
+    nodenames = utils.list_corosync_node_names()
     try:
         nodes = utils.list_cluster_nodes()
     except Exception:
         nodes = []
-    ipaddr = get_ip(name)
-    if name in coronodes or (ipaddr and ipaddr in coronodes):
+    ipaddr = get_ip(addr)
+    if addr in nodenames + coronodes or (ipaddr and ipaddr in coronodes):
+        err_buf.warning("%s already in corosync.conf" % (addr))
+        return
+    if name and name in nodenames + coronodes:
         err_buf.warning("%s already in corosync.conf" % (name))
         return
-    if name in nodes:
+    if addr in nodes:
+        err_buf.warning("%s already in configuration" % (addr))
+        return
+    if name and name in nodes:
         err_buf.warning("%s already in configuration" % (name))
         return
 
     f = open(conf()).read()
     p = Parser(f)
 
-    node_addr = name
-    node_id = next_nodeid(p)
+    node_addr = addr
+    node_id = get_free_nodeid(p)
+    node_name = name
+    node_value = (make_value('nodelist.node.ring0_addr', node_addr) +
+                  make_value('nodelist.node.nodeid', str(node_id)))
+    if node_name:
+        node_value += make_value('nodelist.node.name', node_name)
 
-    p.add('nodelist',
-          make_section('nodelist.node',
-                       make_value('nodelist.node.ring0_addr', node_addr) +
-                       make_value('nodelist.node.nodeid', str(node_id))))
+    p.add('nodelist', make_section('nodelist.node', node_value))
 
     num_nodes = p.count('nodelist.node')
     if num_nodes > 2:
@@ -414,6 +429,10 @@ def add_node(name):
         utils.ext_cmd(["corosync-cmapctl",
                        "-s", "nodelist.node.%s.ring0_addr" % (num_nodes - 1),
                        "str", node_addr], shell=False)
+        if node_name:
+            utils.ext_cmd(["corosync-cmapctl",
+                           "-s", "nodelist.node.%s.name" % (num_nodes - 1),
+                           "str", node_name], shell=False)
 
 
 def del_node(addr):
@@ -443,3 +462,5 @@ def del_node(addr):
                       shell=False)
         utils.ext_cmd(["corosync-cmapctl", "-D", "nodelist.node.%s.ring0_addr" % (nth)],
                       shell=False)
+        utils.ext_cmd(["corosync-cmapctl", "-D", "nodelist.node.%s.name" % (nth)],
+                      shell=False)
diff --git a/modules/crm_gv.py b/modules/crm_gv.py
index af00847..bd7d048 100644
--- a/modules/crm_gv.py
+++ b/modules/crm_gv.py
@@ -28,11 +28,11 @@ class Gv(object):
     '''
     EDGEOP = ''  # actually defined in subclasses
 
-    def __init__(self, id=None):
-        if id:
-            self.id = self.gv_id(id)
+    def __init__(self, ident=None):
+        if ident:
+            self.ident = self.gv_id(ident)
         else:
-            self.id = ""
+            self.ident = ""
         self.nodes = {}
         self.edges = []
         self.subgraphs = []
@@ -50,22 +50,22 @@ class Gv(object):
         self.graph_attrs[attr] = v
 
     def new_attr(self, n, attr_n, attr_v):
-        id = self.gv_id(n)
-        if id not in self.attrs:
-            self.attrs[id] = odict()
-        self.attrs[id][attr_n] = attr_v
+        ident = self.gv_id(n)
+        if ident not in self.attrs:
+            self.attrs[ident] = odict()
+        self.attrs[ident][attr_n] = attr_v
 
     def new_node(self, n, top_node=False, norank=False):
         '''
         Register every node.
         '''
-        id = self.gv_id(n)
+        ident = self.gv_id(n)
         if top_node:
-            self.top_nodes.append(id)
-        elif id not in self.nodes:
-            self.nodes[id] = 0
+            self.top_nodes.append(ident)
+        elif ident not in self.nodes:
+            self.nodes[ident] = 0
         if norank:
-            self.norank_nodes.append(id)
+            self.norank_nodes.append(ident)
 
     def my_edge(self, e):
         return [self.gv_id(x) for x in e if x is not None]
@@ -162,31 +162,31 @@ class GvDot(Gv):
     '''
     EDGEOP = ' -> '
 
-    def __init__(self, id=None):
-        Gv.__init__(self, id)
+    def __init__(self, ident=None):
+        Gv.__init__(self, ident)
 
     def header(self):
-        name = self.id and self.id or "G"
+        name = self.ident and self.ident or "G"
         return 'digraph %s {\n' % (name)
 
     def footer(self):
         return '}'
 
-    def group(self, members, id=None):
+    def group(self, members, ident=None):
         '''
         Groups are subgraphs.
         '''
-        sg_obj = SubgraphDot(id)
+        sg_obj = SubgraphDot(ident)
         sg_obj.new_edge(members)
         self.subgraphs.append(sg_obj)
         self.new_node(members[0])
         return sg_obj
 
-    def optional_set(self, members, id=None):
+    def optional_set(self, members, ident=None):
         '''
         Optional resource sets.
         '''
-        sg_obj = SubgraphDot(id)
+        sg_obj = SubgraphDot(ident)
         e_id = sg_obj.new_edge(members)
         sg_obj.new_edge_attr(e_id, 'style', 'invis')
         sg_obj.new_edge_attr(e_id, 'constraint', 'false')
@@ -219,12 +219,12 @@ class SubgraphDot(GvDot):
     '''
     graphviz subgraph.
     '''
-    def __init__(self, id=None):
-        Gv.__init__(self, id)
+    def __init__(self, ident=None):
+        GvDot.__init__(self, ident)
 
     def header(self):
-        if self.id:
-            return 'subgraph %s {' % self.id
+        if self.ident:
+            return 'subgraph %s {' % self.ident
         else:
             return '{'
 
diff --git a/modules/crm_pssh.py b/modules/crm_pssh.py
index 64c65e9..d3fd27c 100644
--- a/modules/crm_pssh.py
+++ b/modules/crm_pssh.py
@@ -13,13 +13,13 @@ corresponding remote node's hostname or IP address.
 
 import os
 import glob
-import re
 
 from parallax.manager import Manager, FatalError
 from parallax.task import Task
 from parallax import Options
 
 from .msg import common_err, common_debug, common_warn
+from . import config
 
 
 _DEFAULT_TIMEOUT = 60
@@ -39,7 +39,7 @@ def parse_args(outdir, errdir, t=_DEFAULT_TIMEOUT):
     return opts
 
 
-def get_output(dir, host):
+def get_output(odir, host):
     '''
     Looks for the output returned by the given host.
     This is somewhat problematic, since it is possible that
@@ -47,21 +47,21 @@ def get_output(dir, host):
     hosts "host.1" and "host.2" will confuse this code.
     '''
     l = []
-    for fname in ["%s/%s" % (dir, host)] + glob.glob("%s/%s.[0-9]*" % (dir, host)):
+    for fname in ["%s/%s" % (odir, host)] + glob.glob("%s/%s.[0-9]*" % (odir, host)):
         try:
             if os.path.isfile(fname):
                 l += open(fname).readlines()
-        except:
+        except IOError:
             continue
     return l
 
 
-def show_output(dir, hosts, desc):
+def show_output(odir, hosts, desc):
     '''
     Display output from hosts. See get_output for caveats.
     '''
     for host in hosts:
-        out_l = get_output(dir, host)
+        out_l = get_output(odir, host)
         if out_l:
             print "%s %s:" % (host, desc)
             print ''.join(out_l)
@@ -141,7 +141,7 @@ def examine_outcome(l, opts, statuses):
     return True
 
 
-def next_loglines(a, outdir, errdir):
+def next_loglines(a, outdir, errdir, from_time):
     '''
     pssh to nodes to collect new logs.
     '''
@@ -149,8 +149,11 @@ def next_loglines(a, outdir, errdir):
     for node, rptlog, logfile, nextpos in a:
         common_debug("updating %s from %s (pos %d)" %
                      (logfile, node, nextpos))
-        cmdline = "perl -e 'exit(%d) if (stat(\"%s\"))[7]<%d' && tail -c +%d %s" % (
-            _EC_LOGROT, logfile, nextpos-1, nextpos, logfile)
+        if logfile.startswith("/tmp") and logfile.endswith("/journal.log"):
+            cmdline = "/usr/bin/journalctl -o short-iso --since '%s' --no-pager" % (from_time)
+        else:
+            cmdline = "perl -e 'exit(%d) if (stat(\"%s\"))[7]<%d' && tail -c +%d %s" % (
+                _EC_LOGROT, logfile, nextpos-1, nextpos, logfile)
         opts = parse_args(outdir, errdir)
         l.append([node, cmdline])
     statuses = do_pssh(l, opts)
@@ -164,16 +167,13 @@ def next_peinputs(node_pe_l, outdir, errdir):
     '''
     pssh to nodes to collect new logs.
     '''
+    pe_dir = config.path.pe_state_dir
+    vardir = os.path.dirname(pe_dir)
     l = []
     for node, pe_l in node_pe_l:
-        r = re.search("(.*)/pengine/", pe_l[0])
-        if not r:
-            common_err("strange, %s doesn't contain string pengine" % pe_l[0])
-            continue
-        dir = "/%s" % r.group(1)
-        red_pe_l = [x.replace("%s/" % r.group(1), "") for x in pe_l]
+        red_pe_l = [os.path.join("pengine", os.path.basename(x)) for x in pe_l]
+        cmdline = "tar -C %s -chf - %s" % (vardir, ' '.join(red_pe_l))
         common_debug("getting new PE inputs %s from %s" % (red_pe_l, node))
-        cmdline = "tar -C %s -chf - %s" % (dir, ' '.join(red_pe_l))
         opts = parse_args(outdir, errdir)
         l.append([node, cmdline])
     if not l:
diff --git a/modules/handles.py b/modules/handles.py
index c1edde7..71020b9 100644
--- a/modules/handles.py
+++ b/modules/handles.py
@@ -4,7 +4,7 @@
 import re
 
 
-_head_re = re.compile(r'\{\{(\#|\^)?([A-Za-z0-9\#\$:_-]+)\}\}')
+headmatcher = re.compile(r'\{\{(\#|\^)?([A-Za-z0-9\#\$:_-]+)\}\}')
 
 
 class value(object):
@@ -75,7 +75,7 @@ def _textify(obj):
 def _parse(template, context, strict):
     ret = ""
     while template:
-        head = _head_re.search(template)
+        head = headmatcher.search(template)
         if head is None:
             ret += template
             break
diff --git a/modules/help.py b/modules/help.py
index 6ec8b84..a35f107 100644
--- a/modules/help.py
+++ b/modules/help.py
@@ -365,7 +365,7 @@ def _load_help():
             _COMMANDS[lvlname][alias] = HelpEntry(info.short, info.long, (alias, command))
 
         def add_aliases_for_level(lvl):
-            for name, info in lvl._children.iteritems():
+            for name, info in lvl.children().iteritems():
                 for alias in info.aliases:
                     add_help_for_alias(lvl.name, info.name, alias)
                 if info.level:
diff --git a/modules/history.py b/modules/history.py
index f7cd0a0..fff8b00 100644
--- a/modules/history.py
+++ b/modules/history.py
@@ -1,9 +1,9 @@
 # Copyright (C) 2011 Dejan Muhamedagic <dmuhamedagic at suse.de>
+# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund at suse.com>
 # See COPYING for license information.
 
 import os
 import time
-import datetime
 import re
 import glob
 import ConfigParser
@@ -11,22 +11,14 @@ import ConfigParser
 from . import config
 from . import constants
 from . import userdir
-from .msg import common_debug, common_warn, common_err, common_error, common_info, warn_once
-from .xmlutil import file2cib_elem, get_rsc_children_ids, get_prim_children_ids, compressed_file_to_cib
-from .utils import file2str, shortdate, acquire_lock, append_file, ext_cmd, shorttime
-from .utils import page_string, release_lock, rmdir_r, parse_time, get_cib_attributes
-from .utils import is_pcmk_118, pipe_cmd_nosudo, file_find_by_name, get_stdout, quote
-from .utils import make_datetime_naive, datetime_to_timestamp
+from . import logtime
+from . import logparser
+from . import utils
 
-_HAS_PARALLAX = False
-try:
-    from .crm_pssh import next_loglines, next_peinputs
-    _HAS_PARALLAX = True
-except:
-    pass
+from .msg import common_debug, common_warn, common_err, common_error, common_info, warn_once
 
 
-YEAR = None
+_LOG_FILES = ("ha-log.txt", "ha-log", "cluster-log.txt", "messages", "journal.log", "pacemaker.log")
 
 
 #
@@ -38,590 +30,82 @@ YEAR = None
 #
 
 
-def mk_re_list(patt_l, repl):
-    'Build a list of regular expressions, replace "%%" with repl'
-    l = []
-    for re_l in patt_l:
-        l += [x.replace("%%", repl) for x in re_l]
-    if not repl:
-        l = [x.replace(".*.*", ".*") for x in l]
-    return l
-
-
-def set_year(ts=None):
-    '''
-    ts: optional time in seconds
-    '''
-    global YEAR
-    year = time.strftime("%Y", time.localtime(ts))
-    if YEAR is not None:
-        t = (" (ts: %s)" % (ts)) if ts is not None else ""
-        common_debug("history: setting year to %s%s" % (year, t))
-    YEAR = year
-
-
-def make_time(t):
-    '''
-    t: time in seconds / datetime / other
-    returns: time in floating point
-    '''
-    if t is None:
-        return None
-    elif isinstance(t, datetime.datetime):
-        return datetime_to_timestamp(t)
-    return t
-
-
-_syslog2node_formats = (re.compile(r'^[a-zA-Z]{2,4} \d{1,2} \d{2}:\d{2}:\d{2}\s+(?:\[\d+\])?\s*([\S]+)'),
-                        re.compile(r'^\d{4}-\d{2}-\d{2}T\S+\s+(?:\[\d+\])?\s*([\S]+)'))
-
-
-def syslog_ts(s):
-    """
-    Finds the timestamp in the given line
-    Returns as floating point, seconds
-    """
-    fmt1, fmt2 = _syslog2node_formats
-    m = fmt1.match(s)
-    if m:
-        if YEAR is None:
-            set_year()
-        tstr = ' '.join([YEAR] + s.split()[0:3])
-        return datetime_to_timestamp(parse_time(tstr))
-
-    m = fmt2.match(s)
-    if m:
-        tstr = s.split()[0]
-        return datetime_to_timestamp(parse_time(tstr))
-
-    common_debug("malformed line: %s" % s)
-    return None
-
-
-def syslog2node(s):
-    '''
-    Get the node from a syslog line.
-
-    old format:
-    Aug 14 11:07:04 <node> ...
-    new format:
-    Aug 14 11:07:04 [<PID>] <node> ...
-    RFC5424:
-    <TS> <node> ...
-    RFC5424 (2):
-    <TS> [<PID>] <node> ...
-    '''
-
-    fmt1, fmt2 = _syslog2node_formats
-    m = fmt1.search(s)
-    if m:
-        return m.group(1)
-
-    m = fmt2.search(s)
-    if m:
-        return m.group(1)
-
-    try:
-        # strptime defaults year to 1900 (sigh)
-        time.strptime(' '.join(s.split()[0:3]),
-                      "%b %d %H:%M:%S")
-        return s.split()[3]
-    except:  # try the rfc5424
-        try:
-            parse_time(s.split()[0])
-            return s.split()[1]
-        except Exception:
-            return None
-
-
-def seek_to_edge(f, ts, to_end):
-    '''
-    f contains lines with exactly the timestamp ts.
-    Read forward (or backward) till we find the edge.
-    Linear search, but should be short.
-    '''
-    if not to_end:
-        beg = 0
-        while ts == get_timestamp(f):
-            if f.tell() < 1000:
-                f.seek(0)    # otherwise, the seek below throws an exception
-                if beg > 0:  # avoid infinite loop
-                    return   # goes all the way to the top
-                beg += 1
-            else:
-                f.seek(-1000, 1)  # go back 10 or so lines
-    while True:
-        pos = f.tell()
-        s = f.readline()
-        if not s:
-            break
-        curr_ts = syslog_ts(s)
-        if (to_end and curr_ts > ts) or \
-                (not to_end and curr_ts >= ts):
-            break
-    f.seek(pos)
-
-
-def log_seek(f, ts, to_end=False):
-    '''
-    f is an open log. Do binary search for the timestamp.
-    Return the position of the (more or less) first line with an
-    earlier (or later) time.
-    '''
-    first = 0
-    f.seek(0, 2)
-    last = f.tell()
-    if not ts:
-        return to_end and last or first
-    badline = 0
-    maxbadline = 10
-    common_debug("seek %s:%s in %s" %
-                 (time.ctime(ts),
-                  to_end and "end" or "start",
-                  f.name))
-    while first <= last:
-        # we can skip some iterations if it's about few lines
-        if abs(first-last) < 120:
-            break
-        mid = (first+last)/2
-        f.seek(mid)
-        log_ts = get_timestamp(f)
-        if not log_ts:
-            badline += 1
-            if badline > maxbadline:
-                common_warn("giving up on log %s" % f.name)
-                return -1
-            first += 120  # move forward a bit
-            continue
-        if log_ts > ts:
-            last = mid-1
-        elif log_ts < ts:
-            first = mid+1
-        else:
-            seek_to_edge(f, log_ts, to_end)
-            break
-    fpos = f.tell()
-    common_debug("sought to %s (%d)" % (f.readline(), fpos))
-    f.seek(fpos)
-    return fpos
-
-
-def get_timestamp(f):
-    '''
-    Get the whole line from f. The current file position is
-    usually in the middle of the line.
-    Then get timestamp and return it.
-    '''
-    step = 30  # no line should be longer than 30
-    cnt = 1
-    current_pos = f.tell()
-    s = f.readline()
-    if not s:  # EOF?
-        f.seek(-step, 1)  # backup a bit
-        current_pos = f.tell()
-        s = f.readline()
-    while s and current_pos < f.tell():
-        if cnt*step >= f.tell():  # at 0?
-            f.seek(0)
-            break
-        f.seek(-cnt*step, 1)
-        s = f.readline()
-        cnt += 1
-    pos = f.tell()     # save the position ...
-    s = f.readline()   # get the line
-    f.seek(pos)        # ... and move the cursor back there
-    if not s:          # definitely EOF (probably cannot happen)
-        return None
-    return syslog_ts(s)
-
-
 def is_our_log(s, node_l):
-    return syslog2node(s) in node_l
+    return logtime.syslog2node(s) in node_l
 
 
 def log2node(log):
     return os.path.basename(os.path.dirname(log))
 
 
-def filter_log(sl, log_l):
-    '''
-    Filter list of messages to get only those from the given log
-    files list.
-    '''
-    node_l = [log2node(x) for x in log_l if x]
-    ret = [x for x in sl if is_our_log(x, node_l)]
-    common_debug("filter_log: %s in, %s out" % (len(sl), len(ret)))
-    return ret
-
-
-def first_log_lines(log_l):
-    '''
-    Return a list of all first lines of the logs.
-    '''
-    f_list = [open(x) for x in log_l if x]
-    l = [x.readline().rstrip() for x in f_list if x]
-    for x in f_list:
-        if x:
-            x.close()
-    return l
-
-
-def last_log_lines(log_l):
-    '''
-    Return a list of all last lines of the logs.
-    '''
-    f_list = [open(x) for x in log_l if x]
-    l = [x.readlines()[-1].rstrip() for x in f_list if x]
-    for x in f_list:
-        if x:
-            x.close()
-    return l
-
-
-class LogSyslog(object):
-    '''
-    Slice log, search log.
-    '''
-
-    def __init__(self, log_l, from_dt, to_dt):
-        self.log_l = log_l
-        self.f = {}
-        self.startpos = {}
-        self.endpos = {}
-        self.cache = {}
-        self.open_logs()
-        self.set_log_timeframe(from_dt, to_dt)
-
-    def open_log(self, log):
-        import bz2
-        import gzip
-        try:
-            if log.endswith(".bz2"):
-                self.f[log] = bz2.BZ2File(log)
-            elif log.endswith(".gz"):
-                self.f[log] = gzip.open(log)
-            else:
-                self.f[log] = open(log)
-        except IOError, msg:
-            common_err("open %s: %s" % (log, msg))
-
-    def open_logs(self):
-        for log in self.log_l:
-            common_debug("opening log %s" % log)
-            self.open_log(log)
-
-    def set_log_timeframe(self, from_dt, to_dt):
-        '''
-        Convert datetime to timestamps (i.e. seconds), then
-        find out start/end file positions. Logs need to be
-        already open.
-        '''
-        self.from_ts = make_time(from_dt)
-        self.to_ts = make_time(to_dt)
-        bad_logs = []
-        for log in self.f:
-            f = self.f[log]
-            start = log_seek(f, self.from_ts)
-            end = log_seek(f, self.to_ts, to_end=True)
-            if start == -1 or end == -1:
-                common_debug("%s is a bad log" % (log))
-                bad_logs.append(log)
-            else:
-                common_debug("%s start=%s, end=%s" % (log, start, end))
-                self.startpos[f] = start
-                self.endpos[f] = end
-        for log in bad_logs:
-            del self.f[log]
-            self.log_l.remove(log)
-
-    def get_match_line(self, f, relist):
-        '''
-        Get first line from f that matches one of
-        the REs in relist, but is not behind endpos[f].
-        if relist is empty, return all lines
-        '''
-        while f.tell() < self.endpos[f]:
-            fpos = f.tell()
-            s = f.readline().rstrip()
-            if not s:
-                continue
-            if not relist or any(r.search(s) for r in relist):
-                return s, fpos
-        return '', -1
-
-    def single_log_list(self, f, patt):
-        l = []
-        while True:
-            s = self.get_match_line(f, patt)[0]
-            if not s:
-                return l
-            l.append(s)
-        return l
-
-    def search_logs(self, log_l, relist):
-        '''
-        Search logs for any of the regexps in relist.
-        '''
-        fl = [self.f[f] for f in self.f if self.f[f].name in log_l]
-        for f in fl:
-            f.seek(self.startpos[f])
-        # get head lines of all nodes
-        top_line = [self.get_match_line(x, relist)[0] for x in fl]
-        top_line_ts = []
-        rm_idx_l = []
-        # calculate time stamps for head lines
-        for i in range(len(top_line)):
-            if not top_line[i]:
-                rm_idx_l.append(i)
-            else:
-                top_line_ts.append(syslog_ts(top_line[i]))
-        # remove files with no matches found
-        rm_idx_l.reverse()
-        for i in rm_idx_l:
-            del fl[i], top_line[i]
-        common_debug("search in %s" % ", ".join(f.name for f in fl))
-        if len(fl) == 0:  # nothing matched ?
-            return []
-        if len(fl) == 1:
-            # no need to merge if there's only one log
-            return [top_line[0]] + self.single_log_list(fl[0], relist)
-        # search through multiple logs, merge sorted by time
-        l = []
-        first = 0
-        while True:
-            for i in range(len(fl)):
-                try:
-                    if i == first:
-                        continue
-                    if top_line_ts[i] and top_line_ts[i] < top_line_ts[first]:
-                        first = i
-                except:
-                    pass
-            if not top_line[first]:
-                break
-            l.append(top_line[first])
-            top_line[first] = self.get_match_line(fl[first], relist)[0]
-            if not top_line[first]:
-                top_line_ts[first] = time.time()
-            else:
-                top_line_ts[first] = syslog_ts(top_line[first])
-        return l
-
-    def get_matches(self, re_l, log_l=None):
-        '''
-        Return a list of log messages which
-        match one of the regexes in re_l.
-        if re_l is an empty list, return all lines.
-        '''
-        log_l = log_l or self.log_l
-        return filter_log(self.search_logs(log_l, re_l), log_l)
-
-
-def human_date(dt):
-    'Some human date representation. Date defaults to now.'
-    if not dt:
-        dt = make_datetime_naive(datetime.datetime.now())
-    # here, dt is in UTC. Convert to localtime:
-    localdt = datetime.datetime.fromtimestamp(datetime_to_timestamp(dt))
-    # drop microseconds
-    return re.sub("[.].*", "", "%s %s" % (localdt.date(), localdt.time()))
-
-
 def is_log(p):
     return os.path.isfile(p) and os.path.getsize(p) > 0
 
+_PE_NUM_RE = re.compile("pe-[^-]+-([0-9]+)[.]")
+
+
+def get_pe_num(pe_file):
+    m = _PE_NUM_RE.search(pe_file)
+    if m:
+        return m.group(1)
+    return "-1"
 
 def pe_file_in_range(pe_f, a):
-    pe_num = get_pe_num(pe_f)
-    if not a or (a[0] <= int(pe_num) <= a[1]):
+    if not a:
+        return pe_f
+    if a[0] <= int(get_pe_num(pe_f)) <= a[1]:
         return pe_f
     return None
 
 
 def read_log_info(log):
     'Read <log>.info and return logfile and next pos'
-    s = file2str("%s.info" % log)
-    try:
-        logf, pos = s.split()
+    s = utils.file2str(log + ".info")
+    m = re.match(r"^(.+)\s+(\d+)$", s or '')
+    if m:
+        logf, pos = m.groups()
         return logf, int(pos)
-    except:
-        warn_once("crm report too old, you need to update cluster-glue")
-        return '', -1
-
+    return '', -1
 
-def update_loginfo(rptlog, logfile, oldpos, appended_file):
-    'Update <log>.info with new next pos'
-    newpos = oldpos + os.stat(appended_file).st_size
-    try:
-        f = open("%s.info" % rptlog, "w")
-        f.write("%s %d\n" % (logfile, newpos))
-        f.close()
-    except IOError, msg:
-        common_err("couldn't the update %s.info: %s" % (rptlog, msg))
-
-
-def get_pe_num(pe_file):
-    try:
-        return re.search("pe-[^-]+-([0-9]+)[.]", pe_file).group(1)
-    except:
-        return "-1"
 
-
-def run_graph_msg_actions(msg):
+def append_newlogs(outdir, to_update):
     '''
-    crmd: [13667]: info: run_graph: Transition 399 (Complete=5,
-    Pending=1, Fired=1, Skipped=0, Incomplete=3,
-    Source=...
-    Returns dict: d[Pending]=np, d[Fired]=nf, ...
+    Append new logs fetched from nodes.
+    Update <log>.info with new next pos
     '''
-    d = {}
-    s = msg
-    while True:
-        r = re.search("([A-Z][a-z]+)=([0-9]+)", s)
-        if not r:
-            return d
-        d[r.group(1)] = int(r.group(2))
-        s = s[r.end():]
-
-
-def get_pe_file_num_from_msg(msg):
-    """
-    Get PE file name and number from log message
-    Returns: (file, num)
-    """
-    msg_a = msg.split()
-    if len(msg_a) < 5:
-        # this looks too short
-        common_warn("log message <%s> unexpected format, please report a bug" % msg)
-        return ("", "-1")
-    return (msg_a[-1], get_pe_num(msg_a[-1]))
-
-
-def transition_start_re(number_re):
-    """
-    Return regular expression matching transition start.
-    number_re can be a specific transition or a regexp matching
-    any transition number.
-    The resulting RE has groups
-    1: transition number
-    2: full path of pe file
-    3: pe file number
-    """
-    m1 = "crmd.*Processing graph ([0-9]+).*derived from (.*/pe-[^-]+-(%s)[.]bz2)" % (number_re)
-    m2 = "pengine.*Transition ([0-9]+):.*([^ ]*/pe-[^-]+-(%s)[.]bz2)" % (number_re)
-    try:
-        return re.compile("(?:%s)|(?:%s)" % (m1, m2))
-    except re.error, e:
-        common_debug("RE compilation failed: %s" % (e))
-        raise ValueError("Error in search expression")
-
-
-def transition_end_re(number_re):
-    """
-    Return RE matching transition end.
-    See transition_start_re for more details.
-    """
-    try:
-        return re.compile("crmd.*Transition ([0-9]+).*Source=(.*/pe-[^-]+-(%s)[.]bz2).:.*(Stopped|Complete|Terminated)" % (number_re))
-    except re.error, e:
-        common_debug("RE compilation failed: %s" % (e))
-        raise ValueError("Error in search expression")
-
-
-def find_transition_end(trnum, messages):
-    """
-    Find the end of the given transition in the list of messages
-    """
-    matcher = transition_end_re(trnum)
-    for msg in messages:
-        if matcher.search(msg):
-            return msg
-    matcher = transition_start_re(str(int(trnum) + 1))
-    for msg in messages:
-        if matcher.search(msg):
-            return msg
-    return None
-
-
-def find_transition_end_msg(transition_start_msg, trans_msg_l):
-    """
-    Given the start of a transition log message, find
-    and return the end of the transition log messages.
-    """
-    pe_file, pe_num = get_pe_file_num_from_msg(transition_start_msg)
-    if pe_num == "-1":
-        common_warn("%s: strange, transition number not found" % pe_file)
-        return ""
-    return find_transition_end(pe_num, trans_msg_l) or ""
-
+    if not os.path.isdir(outdir):
+        return
+    for node, rptlog, logfile, nextpos in to_update:
+        fl = glob.glob("%s/*%s*" % (outdir, node))
+        if not fl:
+            continue
+        utils.append_file(rptlog, fl[0])
 
-def trans_str(node, pe_file):
-    '''Convert node,pe_file to transition string.'''
-    return "%s:%s" % (node, os.path.basename(pe_file).replace(".bz2", ""))
+        newpos = nextpos + os.stat(fl[0]).st_size
+        try:
+            f = open(rptlog + ".info", "w")
+            f.write("%s %d\n" % (logfile, newpos))
+            f.close()
+        except IOError, msg:
+            common_err("couldn't the update %s.info: %s" % (rptlog, msg))
 
 
 def rpt_pe2t_str(rpt_pe_file):
-    '''Convert report's pe_file path to transition sting.'''
+    '''Convert report's pe_file path to transition string.'''
     node = os.path.basename(os.path.dirname(os.path.dirname(rpt_pe_file)))
-    return trans_str(node, rpt_pe_file)
-
-
-class Transition(object):
-    '''
-    Capture transition related information.
-    '''
-
-    def __init__(self, start_msg, end_msg):
-        self.start_msg = start_msg
-        self.end_msg = end_msg
-        self.tags = set()
-        self.pe_file, self.pe_num = get_pe_file_num_from_msg(start_msg)
-        self.dc = syslog2node(start_msg)
-        self.start_ts = syslog_ts(start_msg)
-        if end_msg:
-            self.end_ts = syslog_ts(end_msg)
-        else:
-            common_warn("end of transition %s not found in logs (transition not complete yet?)" % self)
-            self.end_ts = datetime_to_timestamp(datetime.datetime(2525, 1, 1))
-
-    def __str__(self):
-        return self.get_node_file()
-
-    def get_node_file(self):
-        return trans_str(self.dc, self.pe_file)
-
-    def actions_count(self):
-        if self.end_msg:
-            act_d = run_graph_msg_actions(self.end_msg)
-            return sum(act_d.values())
-        else:
-            return -1
-
-    def shortname(self):
-        return os.path.basename(self.pe_file).replace(".bz2", "")
-
-    def transition_info(self):
-        print "Transition %s (%s -" % (self, shorttime(self.start_ts)),
-        if self.end_msg:
-            print "%s):" % shorttime(self.end_ts)
-            act_d = run_graph_msg_actions(self.end_msg)
-            total = sum(act_d.values())
-            s = ", ".join(["%d %s" % (act_d[x], x) for x in act_d if act_d[x]])
-            print "\ttotal %d actions: %s" % (total, s)
-        else:
-            print "[unfinished])"
+    return logparser.trans_str(node, rpt_pe_file)
 
 
-def mkarchive(dir):
+def mkarchive(idir):
     "Create an archive from a directory"
     home = userdir.gethomedir()
     if not home:
         common_err("no home directory, nowhere to pack report")
         return False
-    archive = '%s.tar.bz2' % os.path.join(home, os.path.basename(dir))
+    archive = '%s.tar.bz2' % os.path.join(home, os.path.basename(idir))
     cmd = "tar -C '%s/..' -cj -f '%s' %s" % \
-        (dir, archive, os.path.basename(dir))
-    if pipe_cmd_nosudo(cmd) != 0:
+        (idir, archive, os.path.basename(idir))
+    if utils.pipe_cmd_nosudo(cmd) != 0:
         common_err('could not pack report, command "%s" failed' % cmd)
         return False
     else:
@@ -663,14 +147,9 @@ class Report(object):
         self.loc = None
         self.ready = False
         self.nodecolor = {}
-        self.logobj = None
+        self.logparser = None
         self.desc = None
-        self._transitions = []
-        self.cibgrp_d = {}
-        self.cibcln_d = {}
-        self.cibrsc_l = []
-        self.cibnotcloned_l = []
-        self.cibcloned_l = []
+        self.cib = None
         self.node_l = []
         self.last_live_update = 0
         self.detail = 0
@@ -679,7 +158,7 @@ class Report(object):
         # change_origin may be 0, CH_SRC, CH_TIME, CH_UPD
         # depending on the change_origin, we update our attributes
         self.change_origin = CH_SRC
-        set_year()
+        logtime.set_year()
 
     def error(self, s):
         common_err("%s: %s" % (self.source, s))
@@ -688,21 +167,22 @@ class Report(object):
         common_warn("%s: %s" % (self.source, s))
 
     def rsc_list(self):
-        return self.cibgrp_d.keys() + self.cibcln_d.keys() + self.cibrsc_l
+        return self.cib.resources()
 
     def node_list(self):
         return self.node_l
 
     def peinputs_list(self):
-        return [x.pe_num for x in self._transitions]
+        if self.logparser:
+            return [x.pe_num for x in self.logparser.get_transitions()]
+        return []
 
     def session_subcmd_list(self):
         return ["save", "load", "pack", "delete", "list", "update"]
 
     def session_list(self):
-        l = os.listdir(self.get_session_dir(None))
-        l.sort()
-        return l
+        d = self.get_session_dir(None)
+        return os.listdir(d).sort() if os.path.isdir(d) else []
 
     def unpack_report(self, tarball):
         '''
@@ -728,7 +208,7 @@ class Report(object):
         if os.path.isdir(loc):
             if (os.stat(tarball).st_mtime - os.stat(loc).st_mtime) < 60:
                 return loc
-            rmdir_r(loc)
+            utils.rmdir_r(loc)
         cwd = os.getcwd()
         if parentdir:
             try:
@@ -737,7 +217,7 @@ class Report(object):
                 self.error(msg)
                 return None
         try:
-            rc, tf_loc = get_stdout("tar -t%s < %s 2> /dev/null | head -1" % (tar_unpack_option, quote(bfname)))
+            rc, tf_loc = utils.get_stdout("tar -t%s < %s 2> /dev/null | head -1" % (tar_unpack_option, utils.quote(bfname)))
             if os.path.abspath(tf_loc) != os.path.abspath(loc):
                 common_debug("top directory in tarball: %s, doesn't match the tarball name: %s" %
                              (tf_loc, loc))
@@ -746,7 +226,7 @@ class Report(object):
             common_err("%s: %s" % (tarball, msg))
             return None
         common_debug("tar -x%s < %s" % (tar_unpack_option, bfname))
-        rc = pipe_cmd_nosudo("tar -x%s < %s" % (tar_unpack_option, bfname))
+        rc = utils.pipe_cmd_nosudo("tar -x%s < %s" % (tar_unpack_option, bfname))
         if self.source == "live":
             os.remove(bfname)
         os.chdir(cwd)
@@ -754,17 +234,18 @@ class Report(object):
             return None
         return loc
 
-    def pe_report_path(self, t_obj):
-        pe_base = os.path.basename(t_obj.pe_file)
-        return os.path.join(self.loc, t_obj.dc, "pengine", pe_base)
-
     def short_pe_path(self, pe_file):
         return pe_file.replace("%s/" % self.loc, "")
 
     def get_nodes(self):
+        def check_node(p):
+            pp = os.path.join(self.loc, p)
+            if os.path.isfile(os.path.join(pp, 'cib.xml')):
+                return p
+            return os.path.isdir(pp) and self.find_node_log(p)
         return sorted([os.path.basename(p)
                        for p in os.listdir(self.loc)
-                       if self.find_node_log(p) is not None])
+                       if check_node(p)])
 
     def check_nodes(self):
         'Verify if the nodes in cib match the nodes in the report.'
@@ -821,7 +302,7 @@ class Report(object):
 
     def find_node_log(self, node):
         p = os.path.join(self.loc, node)
-        for lf in ("ha-log.txt", "messages", "journal.log", "pacemaker.log"):
+        for lf in _LOG_FILES:
             if is_log(os.path.join(p, lf)):
                 return os.path.join(p, lf)
         return None
@@ -837,19 +318,6 @@ class Report(object):
                 self.warn("no log found for node %s" % node)
         return l
 
-    def append_newlogs(self, a):
-        '''
-        Append new logs fetched from nodes.
-        '''
-        if not os.path.isdir(self.outdir):
-            return
-        for node, rptlog, logfile, nextpos in a:
-            fl = glob.glob("%s/*%s*" % (self.outdir, node))
-            if not fl:
-                continue
-            append_file(rptlog, fl[0])
-            update_loginfo(rptlog, logfile, nextpos, fl[0])
-
     def unpack_new_peinputs(self, node, pe_l):
         '''
         Untar PE inputs fetched from nodes.
@@ -860,7 +328,7 @@ class Report(object):
         if not fl:
             return -1
         u_dir = os.path.join(self.loc, node)
-        return pipe_cmd_nosudo("tar -C %s -x < %s" % (u_dir, fl[0]))
+        return utils.pipe_cmd_nosudo("tar -C %s -x < %s" % (u_dir, fl[0]))
 
     def read_new_log(self, node):
         '''
@@ -879,57 +347,57 @@ class Report(object):
             return []
         return f.readlines()
 
-    def update_live_report(self):
+    def update_live_report(self, next_loglines, next_peinputs):
         '''
         Update the existing live report, if it's older than
         self.short_live_recent:
         - append newer logs
         - get new PE inputs
+        TODO: FIXME: broken now
         '''
-        a = []
-        common_info("fetching new logs, please wait ...")
+        common_info("Fetching updated logs from cluster nodes. Please wait...")
+        common_debug("Candidate logs: %s" % (self.log_l))
+        to_update = []
         for rptlog in self.log_l:
             node = log2node(rptlog)
             logf, pos = read_log_info(rptlog)
             if logf:
-                a.append([node, rptlog, logf, pos])
-        if not a:
-            common_info("no elligible logs found")
+                common_debug("Updating %s : %s : %s : %s" % (node, rptlog, logf, pos))
+                to_update.append([node, rptlog, logf, pos])
+        if not to_update:
+            common_info("No updatable logs found (missing .info for logs)")
             return False
-        rmdir_r(self.outdir)
-        rmdir_r(self.errdir)
+
+        utils.rmdir_r(self.outdir)
+        utils.rmdir_r(self.errdir)
         self.last_live_update = time.time()
-        rc1 = next_loglines(a, self.outdir, self.errdir)
-        self.append_newlogs(a)
-        node_pe_l = []
-        for node in [x[0] for x in a]:
-            log_l = self.read_new_log(node)
-            if not log_l:
-                continue
-            pe_l = []
-            for new_t_obj in self.list_transitions(log_l, future_pe=True):
-                self._new_transition(new_t_obj)
-                pe_l.append(new_t_obj.pe_file)
-            if pe_l:
-                node_pe_l.append([node, pe_l])
-        rmdir_r(self.outdir)
-        rmdir_r(self.errdir)
-        if not node_pe_l:
-            return rc1
+
+        end_time = self._str_dt(self.get_rpt_dt(self.to_dt, "bottom"))
+        rc1 = next_loglines(to_update, self.outdir, self.errdir, end_time)
+        append_newlogs(self.outdir, to_update)
+
+        # read new logs
+        # find any missing pefiles
+        # return list of missing pefiles
+        # fetch missing pefiles from nodes
+        # unpack missing pefiles
+        # node_pe_l: [(node, [pefile ...]) ...]
+        node_pe_l = self.logparser.scan(mode='refresh')
         rc2 = next_peinputs(node_pe_l, self.outdir, self.errdir)
         unpack_rc = 0
         for node, pe_l in node_pe_l:
             unpack_rc |= self.unpack_new_peinputs(node, pe_l)
         rc2 |= (unpack_rc == 0)
-        rmdir_r(self.outdir)
-        rmdir_r(self.errdir)
+        utils.rmdir_r(self.outdir)
+        utils.rmdir_r(self.errdir)
+
         return rc1 and rc2
 
     def get_live_report(self):
-        if not acquire_lock(self.report_cache_dir):
+        if not utils.acquire_lock(self.report_cache_dir):
             return None
         loc = self.new_live_report()
-        release_lock(self.report_cache_dir)
+        utils.release_lock(self.report_cache_dir)
         return loc
 
     def manage_live_report(self, force=False, no_live_update=False):
@@ -951,11 +419,17 @@ class Report(object):
             # try just to refresh the live report
             if self.to_dt or self.is_live_very_recent() or no_live_update:
                 return self._live_loc()
+            _HAS_PARALLAX = False
+            try:
+                from .crm_pssh import next_loglines, next_peinputs
+                _HAS_PARALLAX = True
+            except:
+                pass
             if _HAS_PARALLAX:
-                if not acquire_lock(self.report_cache_dir):
+                if not utils.acquire_lock(self.report_cache_dir):
                     return None
-                rc = self.update_live_report()
-                release_lock(self.report_cache_dir)
+                rc = self.update_live_report(next_loglines, next_peinputs)
+                utils.release_lock(self.report_cache_dir)
                 if rc:
                     self.set_change_origin(CH_UPD)
                     return self._live_loc()
@@ -977,24 +451,24 @@ class Report(object):
             return None
 
         d = self._live_loc()
-        rmdir_r(d)
+        utils.rmdir_r(d)
         tarball = "%s.tar.bz2" % d
         to_option = ""
         if self.to_dt:
-            to_option = "-t '%s'" % human_date(self.to_dt)
+            to_option = "-t '%s'" % logtime.human_date(self.to_dt)
         nodes_option = ""
         if self.setnodes:
             nodes_option = "'-n %s'" % ' '.join(self.setnodes)
-        if pipe_cmd_nosudo("mkdir -p %s" % os.path.dirname(d)) != 0:
+        if utils.pipe_cmd_nosudo("mkdir -p %s" % os.path.dirname(d)) != 0:
             return None
         common_info("Retrieving information from cluster nodes, please wait...")
-        rc = pipe_cmd_nosudo("%s -Z -Q -f '%s' %s %s %s %s" %
-                             (extcmd,
-                              self.from_dt.ctime(),
-                              to_option,
-                              nodes_option,
-                              str(config.core.report_tool_options),
-                              d))
+        rc = utils.pipe_cmd_nosudo("%s -Z -Q -f '%s' %s %s %s %s" %
+                                   (extcmd,
+                                    self.from_dt.ctime(),
+                                    to_option,
+                                    nodes_option,
+                                    str(config.core.report_tool_options),
+                                    d))
         if rc != 0:
             if os.path.isfile(tarball):
                 self.warn("report thinks it failed, proceeding anyway")
@@ -1039,6 +513,8 @@ class Report(object):
         Set the detail level.
         '''
         self.detail = int(detail_lvl)
+        if self.logparser:
+            self.logparser.detail = self.detail
 
     def set_nodes(self, *args):
         '''
@@ -1057,39 +533,7 @@ class Report(object):
         Get some information from the report's CIB (node list,
         resource list, groups). If "live" then use cibadmin.
         '''
-        cib_elem = None
-        cib_f = self.get_cib_loc()
-        if cib_f:
-            cib_elem = file2cib_elem(cib_f)
-        if cib_elem is None:
-            return  # no cib?
-        try:
-            conf = cib_elem.find("configuration")
-        except:  # bad cib?
-            return
-        self.cibrsc_l = [x.get("id")
-                         for x in conf.xpath("//resources//primitive")]
-        self.cibgrp_d = {}
-        for grp in conf.xpath("//resources/group"):
-            self.cibgrp_d[grp.get("id")] = get_rsc_children_ids(grp)
-        self.cibcln_d = {}
-        self.cibcloned_l = []
-        for cln in conf.xpath("//resources/clone") + \
-                conf.xpath("//resources/master"):
-            try:
-                self.cibcln_d[cln.get("id")] = get_prim_children_ids(cln)
-                self.cibcloned_l += self.cibcln_d[cln.get("id")]
-            except:
-                pass
-        self.cibnotcloned_l = [x for x in self.cibrsc_l if x not in self.cibcloned_l]
-
-    def _new_transition(self, transition):
-        t_obj = self.find_transition(transition.get_node_file())
-        if t_obj:
-            common_debug("duplicate %s, replacing older PE file" % transition)
-            self._transitions.remove(t_obj)
-        common_debug("appending new PE %s" % transition)
-        self._transitions.append(transition)
+        self.cib = logparser.CibInfo(self.loc)
 
     def set_node_colors(self):
         i = 0
@@ -1097,69 +541,6 @@ class Report(object):
             self.nodecolor[n] = self.nodecolors[i]
             i = (i+1) % len(self.nodecolors)
 
-    def get_invoke_trans_msgs(self, msg_l):
-        te_invoke_patt = transition_start_re("[0-9]+")
-        return (x for x in msg_l if te_invoke_patt.search(x))
-
-    def get_all_trans_msgs(self, msg_l=None):
-        trans_re_l = (transition_start_re("[0-9]+"), transition_end_re("[0-9]+"))
-        if msg_l is None:
-            return self.logobj.get_matches(trans_re_l)
-        else:
-            return (x for x in msg_l if trans_re_l[0].search(x) or trans_re_l[1].search(x))
-
-    def is_empty_transition(self, t0, t1):
-        if not (t0 and t1):
-            return False
-        old_pe_l_file = self.pe_report_path(t0)
-        new_pe_l_file = self.pe_report_path(t1)
-        if not (os.path.isfile(old_pe_l_file) or os.path.isfile(new_pe_l_file)):
-            return True
-        num_actions = t1.actions_count()
-        old_cib = compressed_file_to_cib(old_pe_l_file)
-        new_cib = compressed_file_to_cib(new_pe_l_file)
-        if old_cib is None or new_cib is None:
-            return num_actions == 0
-        prev_epoch = old_cib.attrib.get("epoch", "0")
-        epoch = new_cib.attrib.get("epoch", "0")
-        prev_admin_epoch = old_cib.attrib.get("admin_epoch", "0")
-        admin_epoch = new_cib.attrib.get("admin_epoch", "0")
-        return num_actions == 0 and epoch == prev_epoch and admin_epoch == prev_admin_epoch
-
-    def list_transitions(self, msg_l=None, future_pe=False):
-        '''
-        List transitions by reading logs.
-        Empty transitions are skipped.
-        Some callers need original PE file path (future_pe),
-        otherwise we produce the path within the report and check
-        if the transition files exist.
-        NB: future_pe means that the peinput has not been fetched yet.
-        If the caller doesn't provide the message list, then we
-        build it from the collected log files (self.logobj).
-        Otherwise, we get matches for transition patterns.
-
-        WARN: We rely here on the message format (syslog,
-        pacemaker).
-        '''
-        trans_msg_l = self.get_all_trans_msgs(msg_l)
-        trans_start_msg_l = self.get_invoke_trans_msgs(trans_msg_l)
-        prev_transition = None
-        for msg in trans_start_msg_l:
-            transition_end_msg = find_transition_end_msg(msg, trans_msg_l)
-            t_obj = Transition(msg, transition_end_msg)
-            if self.is_empty_transition(prev_transition, t_obj):
-                common_debug("skipping empty transition (%s)" % t_obj)
-                continue
-            self._set_transition_tags(t_obj)
-            if not future_pe:
-                pe_l_file = self.pe_report_path(t_obj)
-                if not os.path.isfile(pe_l_file):
-                    warn_once("%s in the logs, but not in the report" % t_obj)
-                    continue
-            common_debug("found PE input: %s" % t_obj)
-            prev_transition = t_obj
-            yield t_obj
-
     def _report_setup_source(self):
         constants.pcmk_version = None
         # is this an hb_report or a crm_report?
@@ -1174,7 +555,7 @@ class Report(object):
                     self._creator = "crm_report"
                 else:
                     self._creator = 'unknown'
-                set_year(yr)
+                logtime.set_year(yr)
                 break
         else:
             self.error("Invalid report: No description found")
@@ -1204,15 +585,10 @@ class Report(object):
         elif self.change_origin == CH_UPD:
             self._report_setup_update()
 
-        self.logobj = LogSyslog(self.log_l,
-                                self.from_dt,
-                                self.to_dt)
-
-        if self.change_origin != CH_UPD:
-            common_debug("getting transitions from logs")
-            self._transitions = []
-            for new_t_obj in self.list_transitions():
-                self._new_transition(new_t_obj)
+        if self.logparser is None:
+            self.logparser = logparser.LogParser(self.loc, self.cib, self.log_l, self.detail)
+            self.logparser.scan()
+        self.logparser.set_timeframe(self.from_dt, self.to_dt)
 
         self.ready = self.check_report()
         self.set_change_origin(0)
@@ -1242,46 +618,26 @@ class Report(object):
 
     def refresh_source(self, force=False):
         '''
-        Refresh report from live.
+        Refresh report from live,
+        or clear metadata cache for non-live report
         '''
-        if self.source != "live":
-            self.error("refresh not supported")
-            return False
-        self.last_live_update = 0
-        self.loc = self.manage_live_report(force=force)
-        self.report_setup()
-        return self.ready
-
-    def get_patt_l(self, type):
-        '''
-        get the list of patterns for this type, up to and
-        including current detail level
-        '''
-        cib_f = None
-        if self.source != "live":
-            cib_f = self.get_cib_loc()
-        if is_pcmk_118(cib_f=cib_f):
-            from .log_patterns_118 import log_patterns
-        else:
-            from .log_patterns import log_patterns
-        if type not in log_patterns:
-            common_error("%s not featured in log patterns" % type)
-            return None
-        return log_patterns[type][0:self.detail+1]
-
-    def build_re(self, type, args):
-        '''
-        Prepare a regex string for the type and args.
-        For instance, "resource" and rsc1, rsc2, ...
-        '''
-        patt_l = self.get_patt_l(type)
-        if not patt_l:
-            return None
-        if not args:
-            re_l = mk_re_list(patt_l, "")
+        if self.source == "live":
+            self.last_live_update = 0
+            self.loc = self.manage_live_report(force=force)
+            self.report_setup()
+            return self.ready
         else:
-            re_l = mk_re_list(patt_l, r'(?:%s)' % "|".join(args))
-        return re_l
+            print("Refreshing log data...")
+            if not self.ready:
+                self.set_change_origin(CH_TIME)
+                self.prepare_source()
+            missing_pes = self.logparser.scan(mode='force')
+            if len(missing_pes):
+                print("%d transitions, %d events and %d missing PE input files." %
+                      tuple(self.logparser.count() + (len(missing_pes),)))
+            else:
+                print("%d transitions, %d events." %
+                      self.logparser.count())
 
     def _str_nodecolor(self, node, s):
         try:
@@ -1294,13 +650,6 @@ class Report(object):
             s = s.replace("${", "$.{")
             return "${%s}%s${NORMAL}" % (clr, s)
 
-    def disp(self, s):
-        'color output'
-        node = syslog2node(s)
-        if node is None:
-            return s
-        return self._str_nodecolor(node, s)
-
     def match_filter_out(self, s):
         for regexp in self.log_filter_out_re:
             if regexp.search(s):
@@ -1308,22 +657,20 @@ class Report(object):
         return False
 
     def display_logs(self, l):
+        def color_nodes(s):
+            node = logtime.syslog2node(s)
+            return self._str_nodecolor(node, s) if node is not None else s
+
         if self.log_filter_out_re:
-            l = [x for x in l if not self.match_filter_out(x)]
-        page_string('\n'.join([self.disp(x) for x in l]))
+            utils.page_gen(color_nodes(x) for x in l if not self.match_filter_out(x))
+        else:
+            utils.page_gen(color_nodes(x) for x in l)
 
-    def show_logs(self, log_l=None, re_l=[]):
+    def show_logs(self, nodes=None):
         '''
-        Print log lines, either matched by re_l or all.
+        Print log lines, either all or matching a given node
         '''
-        def process(r):
-            return re.compile(r) if isinstance(r, basestring) else r
-        if not log_l:
-            log_l = self.log_l
-        if not log_l:
-            self.error("no logs found")
-            return
-        self.display_logs(self.logobj.get_matches([process(r) for r in re_l], log_l))
+        self.display_logs(self.logparser.get_logs(nodes=nodes))
 
     def get_source(self):
         return self.source
@@ -1346,15 +693,16 @@ class Report(object):
         output'''
         max_output = 20
         s = ""
-        if len(self._transitions) > max_output:
+        transitions = list(self.logparser.get_transitions())
+        if len(transitions) > max_output:
             s = "... "
 
         def fmt(t):
-            if 'error' in t.tags:
+            if len(t.tags):
                 return self._str_nodecolor(t.dc, t.pe_num) + "*"
             return self._str_nodecolor(t.dc, t.pe_num)
 
-        return "%s%s" % (s, ' '.join([fmt(x) for x in self._transitions[-max_output:]]))
+        return "%s%s" % (s, ' '.join([fmt(x) for x in transitions[-max_output:]]))
 
     def get_rpt_dt(self, dt, whence):
         '''
@@ -1362,23 +710,31 @@ class Report(object):
         The ts input is the time stamp set by user (it can be
         empty). whence is set either to "top" or "bottom".
         '''
+        def first_line(l):
+            l.seek(0)
+            return l.readline().rstrip()
+
+        def last_line(l):
+            '''Note: assumes that the last log line isn't > 2048 characters'''
+            l.seek(-2048, os.SEEK_END)
+            return l.readlines()[-1].rstrip()
+
         if dt:
             return dt
         try:
             if whence == "top":
-                myts = min([syslog_ts(x) for x in first_log_lines(self.log_l)])
+                myts = min(logtime.syslog_ts(x) for x in (first_line(l) for l in self.logparser.fileobjs))
             elif whence == "bottom":
-                myts = max([syslog_ts(x) for x in last_log_lines(self.log_l)])
+                myts = max(logtime.syslog_ts(x) for x in (last_line(l) for l in self.logparser.fileobjs))
             if myts:
-                import dateutil.tz
-                return make_datetime_naive(datetime.datetime.fromtimestamp(myts).replace(tzinfo=dateutil.tz.tzlocal()))
+                return utils.timestamp_to_datetime(myts)
             common_debug("No log lines with timestamps found in report")
-        except Exception, e:
+        except Exception as e:
             common_debug("Error: %s" % (e))
         return None
 
     def _str_dt(self, dt):
-        return dt and human_date(dt) or "--/--/-- --:--:--"
+        return dt and logtime.human_date(dt) or "--/--/-- --:--:--"
 
     def info(self):
         '''
@@ -1390,18 +746,19 @@ class Report(object):
         created_on = self.get_desc_line("Date") or self._creation_time
         created_by = self.get_desc_line("By") or self._creator
 
-        page_string('\n'.join(("Source: %s" % self.source,
-                               "Created on: %s" % (created_on),
-                               "By: %s" % (created_by),
-                               "Period: %s - %s" %
-                               (self._str_dt(self.get_rpt_dt(self.from_dt, "top")),
-                                self._str_dt(self.get_rpt_dt(self.to_dt, "bottom"))),
-                               "Nodes: %s" % ' '.join([self._str_nodecolor(x, x)
-                                                       for x in self.node_l]),
-                               "Groups: %s" % ' '.join(self.cibgrp_d.keys()),
-                               "Resources: %s" % ' '.join(self.cibrsc_l),
-                               "Transitions: %s" % self.short_peinputs_list()
-                               )))
+        utils.page_string(
+            '\n'.join(("Source: %s" % self.source,
+                       "Created on: %s" % (created_on),
+                       "By: %s" % (created_by),
+                       "Period: %s - %s" %
+                       (self._str_dt(self.get_rpt_dt(self.from_dt, "top")),
+                        self._str_dt(self.get_rpt_dt(self.to_dt, "bottom"))),
+                       "Nodes: %s" % ' '.join([self._str_nodecolor(x, x)
+                                               for x in self.node_l]),
+                       "Groups: %s" % ' '.join(self.cib.groups.keys()),
+                       "Clones: %s" % ' '.join(self.cib.clones.keys()),
+                       "Resources: %s" % ' '.join(self.cib.primitives),
+                       "Transitions: %s" % self.short_peinputs_list())))
 
     def events(self):
         '''
@@ -1409,19 +766,12 @@ class Report(object):
         '''
         if not self.prepare_source():
             return False
-        rsc_l = self.cibnotcloned_l
-        rsc_l += ["%s(?::[0-9]+)?" % x for x in self.cibcloned_l]
-        all_re_l = self.build_re("resource", rsc_l) + \
-            self.build_re("node", self.node_l) + \
-            self.build_re("events", [])
-        if not all_re_l:
-            self.error("no resources or nodes found")
-            return False
-        return self.show_logs(re_l=all_re_l)
+
+        self.display_logs(self.logparser.get_events())
 
     def find_transition(self, t_str):
-        for t_obj in self._transitions:
-            if t_obj.get_node_file() == t_str:
+        for t_obj in self.logparser.get_transitions():
+            if str(t_obj) == t_str:
                 return t_obj
         return None
 
@@ -1436,120 +786,66 @@ class Report(object):
             common_err("%s: transition not found" % rpt_pe_file)
             return False
         # limit the log scope temporarily
-        self.logobj.set_log_timeframe(t_obj.start_ts, t_obj.end_ts)
+        self.logparser.set_timeframe(t_obj.start_ts, t_obj.end_ts)
         if full_log:
             self.show_logs()
         else:
             t_obj.transition_info()
             self.events()
-        self.logobj.set_log_timeframe(self.from_dt, self.to_dt)
+        self.logparser.set_timeframe(self.from_dt, self.to_dt)
         return True
 
-    def show_transition_tags(self, rpt_pe_file):
+    def get_transition_tags(self, rpt_pe_file):
         '''
-        prints the tags for the transition
+        Returns the tags for the transition as a sorted list
         '''
         t_obj = self.find_transition(rpt_pe2t_str(rpt_pe_file))
         if not t_obj:
             common_err("%s: transition not found" % rpt_pe_file)
-            return False
-        for tag in t_obj.tags:
-            print tag
-        return True
-
-    def _set_transition_tags(self, transition):
-        # limit the log scope temporarily
-        self.logobj.set_log_timeframe(transition.start_ts, transition.end_ts)
-
-        # search log, match regexes to tags
-        regexes = [
-            re.compile(r"(error|unclean)", re.I),
-            re.compile(r"crmd.*notice:\s+Operation\s+([^:]+):\s+(?!ok)"),
-        ]
-
-        for l in self.logobj.get_matches(regexes):
-            for rx in regexes:
-                m = rx.search(l)
-                if m:
-                    transition.tags.add(m.group(1).lower())
-
-        self.logobj.set_log_timeframe(self.from_dt, self.to_dt)
+            return None
+        return sorted(t_obj.tags)
 
     def resource(self, *args):
         '''
-        Show resource relevant logs.
+        Show resource events.
         '''
         if not self.prepare_source(no_live_update=self.prevent_live_update()):
             return False
-        # expand groups (if any)
-        expanded_l = []
-        for a in args:
-            # add group members, groups aren't logged
-            if a in self.cibgrp_d:
-                expanded_l += self.cibgrp_d[a]
-            # add group members, groups aren't logged
-            elif a in self.cibcln_d:
-                expanded_l += self.cibcln_d[a]
-            else:
-                expanded_l.append(a)
-        exp_cloned_l = []
-        for rsc in expanded_l:
-            if rsc in self.cibcloned_l:
-                exp_cloned_l.append("%s(?::[0-9]+)?" % rsc)
-            else:
-                exp_cloned_l.append(rsc)
-        rsc_re_l = self.build_re("resource", exp_cloned_l)
-        if not rsc_re_l:
-            return False
-        self.show_logs(re_l=rsc_re_l)
+        self.display_logs(self.logparser.get_events(event="resource", resources=args))
 
     def node(self, *args):
         '''
-        Show node relevant logs.
+        Show node events.
         '''
         if not self.prepare_source(no_live_update=self.prevent_live_update()):
             return False
-        node_re_l = self.build_re("node", args)
-        if not node_re_l:
-            return False
-        self.show_logs(re_l=node_re_l)
+        self.display_logs(self.logparser.get_events(event="node", nodes=args))
 
-    def log(self, *args):
+    def show_log(self, *nodes):
         '''
         Show logs for a node or all nodes.
         '''
         if not self.prepare_source():
             return False
-        if not args:
-            self.show_logs()
-        else:
-            l = []
-            for n in args:
-                if n not in self.node_l:
-                    self.warn("%s: no such node" % n)
-                    continue
-                l.append(self.find_node_log(n))
-            if not l:
-                return False
-            self.show_logs(log_l=l)
-
-    pe_details_header = "Date       Start    End       Filename      Client     User       Origin"
-    pe_details_separator = "====       =====    ===       ========      ======     ====       ======"
+        self.show_logs(nodes=nodes)
 
     def pe_detail_format(self, t_obj):
         l = [
-            shortdate(t_obj.start_ts),
-            shorttime(t_obj.start_ts),
-            t_obj.end_ts and shorttime(t_obj.end_ts) or "--:--:--",
+            utils.shortdate(t_obj.start_ts),
+            utils.shorttime(t_obj.start_ts),
+            t_obj.end_ts and utils.shorttime(t_obj.end_ts) or "--:--:--",
             # the format string occurs also below
             self._str_nodecolor(t_obj.dc, '%-13s' % t_obj.shortname())
         ]
-        l += get_cib_attributes(self.pe_report_path(t_obj), "cib",
-                                ("update-client", "update-user", "update-origin"),
-                                ("no-client", "no-user", "no-origin"))
-        return '%s %s %s  %-13s %-10s %-10s %s' % tuple(l)
-
-    def pelist(self, a=None, long=False):
+        l += utils.get_cib_attributes(t_obj.path(), "cib",
+                                      ("update-client", "update-user", "update-origin"),
+                                      ("no-client", "no-user", "no-origin"))
+        l += [" ".join(sorted(t_obj.tags))]
+        return '%s %s %s  %-13s %-10s %-10s %s   %s' % tuple(l)
+
+    def pelist(self, a=None, verbose=False):
+        pe_details_hdr = "Date       Start    End       Filename      Client     User       Origin      Tags"
+        pe_details_sep = "====       =====    ===       ========      ======     ====       ======      ===="
         if not self.prepare_source(no_live_update=self.prevent_live_update()):
             return []
         if isinstance(a, (tuple, list)):
@@ -1557,12 +853,17 @@ class Report(object):
                 a.append(a[0])
         elif a is not None:
             a = [a, a]
-        l = [long and self.pe_detail_format(t_obj) or self.pe_report_path(t_obj)
-             for t_obj in self._transitions if pe_file_in_range(t_obj.pe_file, a)]
-        if long:
-            l = [self.pe_details_header, self.pe_details_separator] + l
+        l = [verbose and self.pe_detail_format(t_obj) or t_obj.path()
+             for t_obj in self.logparser.get_transitions() if pe_file_in_range(t_obj.pe_file, a)]
+        if verbose:
+            l = [pe_details_hdr, pe_details_sep] + l
         return l
 
+    def show_transitions(self):
+        if not self.prepare_source(no_live_update=self.prevent_live_update()):
+            return []
+        return ["%-30s  %-15s %-15s Tags" % ("Time", "Name", "Node")] + [t.description() for t in self.logparser.get_transitions()]
+
     def dotlist(self, a=None):
         l = [x.replace("bz2", "dot") for x in self.pelist(a)]
         return [x for x in l if os.path.isfile(x)]
@@ -1570,7 +871,7 @@ class Report(object):
     def find_pe_files(self, path):
         'Find a PE or dot file matching part of the path.'
         pe_l = path.endswith(".dot") and self.dotlist() or self.pelist()
-        return [x for x in pe_l if x.endswith(path)]
+        return [x for x in pe_l if x.find(path) >= 0]
 
     def pe2dot(self, f):
         f = f.replace("bz2", "dot")
@@ -1579,7 +880,7 @@ class Report(object):
         return None
 
     def find_file(self, f):
-        return file_find_by_name(self.loc, f)
+        return utils.file_find_by_name(self.loc, f)
 
     def get_session_dir(self, name):
         try:
@@ -1589,7 +890,7 @@ class Report(object):
     state_file = 'history_state.cfg'
     rpt_section = 'report'
 
-    def save_state(self, dir):
+    def save_state(self, sdir):
         '''
         Save the current history state. It should include:
         - directory
@@ -1600,14 +901,14 @@ class Report(object):
         p = ConfigParser.SafeConfigParser()
         p.add_section(self.rpt_section)
         p.set(self.rpt_section, 'dir',
-              self.source == "live" and dir or self.source)
+              self.source == "live" and sdir or self.source)
         p.set(self.rpt_section, 'from_time',
-              self.from_dt and human_date(self.from_dt) or '')
+              self.from_dt and logtime.human_date(self.from_dt) or '')
         p.set(self.rpt_section, 'to_time',
-              self.to_dt and human_date(self.to_dt) or '')
+              self.to_dt and logtime.human_date(self.to_dt) or '')
         p.set(self.rpt_section, 'detail', str(self.detail))
         self.manage_excludes("save", p)
-        fname = os.path.join(dir, self.state_file)
+        fname = os.path.join(sdir, self.state_file)
         try:
             f = open(fname, "wb")
         except IOError, msg:
@@ -1617,12 +918,12 @@ class Report(object):
         f.close()
         return True
 
-    def load_state(self, dir):
+    def load_state(self, sdir):
         '''
         Load the history state from a file.
         '''
         p = ConfigParser.SafeConfigParser()
-        fname = os.path.join(dir, self.state_file)
+        fname = os.path.join(sdir, self.state_file)
         try:
             p.read(fname)
         except Exception, msg:
@@ -1632,17 +933,17 @@ class Report(object):
         try:
             for n, v in p.items(self.rpt_section):
                 if n == 'dir':
-                    self.source = self.loc = v
-                    if not os.path.exists(self.loc):
+                    self.set_source(v)
+                    if not os.path.exists(v):
                         common_err("session state file %s points to a non-existing directory: %s" %
-                                   (fname, self.loc))
+                                   (fname, v))
                         rc = False
                 elif n == 'from_time':
-                    self.from_dt = v and parse_time(v) or None
+                    self.from_dt = v and utils.parse_time(v) or None
                 elif n == 'to_time':
-                    self.to_dt = v and parse_time(v) or None
+                    self.to_dt = v and utils.parse_time(v) or None
                 elif n == 'detail':
-                    self.detail = int(v)
+                    self.set_detail(v)
                 else:
                     common_warn("unknown item %s in the session state file %s" %
                                 (n, fname))
@@ -1667,32 +968,33 @@ class Report(object):
             self.change_origin = org
 
     def manage_session(self, subcmd, name):
-        dir = self.get_session_dir(name)
-        if subcmd == "save" and os.path.exists(dir):
+        session_dir = self.get_session_dir(name)
+        if subcmd == "save" and os.path.exists(session_dir):
             common_err("history session %s exists" % name)
             return False
-        elif subcmd in ("load", "pack", "update", "delete") and not os.path.exists(dir):
+        elif subcmd in ("load", "pack", "update", "delete") and not os.path.exists(session_dir):
             common_err("history session %s does not exist" % name)
             return False
         if subcmd == "save":
-            if pipe_cmd_nosudo("mkdir -p %s" % dir) != 0:
+            if utils.pipe_cmd_nosudo("mkdir -p %s" % session_dir) != 0:
                 return False
             if self.source == "live":
-                rc = pipe_cmd_nosudo("tar -C '%s' -c . | tar -C '%s' -x" %
-                                     (self._live_loc(), dir))
+                rc = utils.pipe_cmd_nosudo("tar -C '%s' -c . | tar -C '%s' -x" %
+                                           (self._live_loc(), session_dir))
                 if rc != 0:
                     return False
-            return self.save_state(dir)
+            return self.save_state(session_dir)
         elif subcmd == "update":
-            return self.save_state(dir)
+            return self.save_state(session_dir)
         elif subcmd == "load":
-            return self.load_state(dir)
+            return self.load_state(session_dir)
         elif subcmd == "delete":
-            rmdir_r(dir)
+            utils.rmdir_r(session_dir)
         elif subcmd == "list":
-            ext_cmd("ls %s" % self.get_session_dir(None))
+            for l in self.session_list():
+                print(l)
         elif subcmd == "pack":
-            return mkarchive(dir)
+            return mkarchive(session_dir)
         return True
     log_section = 'log'
 
diff --git a/modules/idmgmt.py b/modules/idmgmt.py
index 9bc348c..7804379 100644
--- a/modules/idmgmt.py
+++ b/modules/idmgmt.py
@@ -1,15 +1,13 @@
 # Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic at suse.de>
 # See COPYING for license information.
+#
+# Make sure that ids are unique.
 
 from . import constants
 import copy
 from .msg import common_error, id_used_err
 from . import xmlutil
 
-
-'''
-Make sure that ids are unique.
-'''
 _id_store = {}
 _state = []
 ok = True  # error var
@@ -165,7 +163,7 @@ def clear():
     _state = []
 
 
-def set(node, oldnode, id_hint, id_required=True):
+def set_id(node, oldnode, id_hint, id_required=True):
     '''
     Set the id attribute for the node.
     - if the node already contains "id", keep it
diff --git a/modules/log_patterns.py b/modules/log_patterns.py
index 9513dca..ab6bfc6 100644
--- a/modules/log_patterns.py
+++ b/modules/log_patterns.py
@@ -20,7 +20,11 @@
 #
 # [Note that resources may contain clone numbers!]
 
-log_patterns = {
+from . import utils
+
+__all__ = ('patterns',)
+
+_patterns_old = {
     "resource": (
         (  # detail 0
             "lrmd.*%% (?:start|stop|promote|demote|migrate)",
@@ -76,3 +80,70 @@ log_patterns = {
         ),
     ),
 }
+
+_patterns_118 = {
+    "resource": (
+        (  # detail 0
+            "crmd.*Initiating.*%%_(?:start|stop|promote|demote|migrate)_",
+            "lrmd.*operation_finished: %%_",
+            "lrmd.*executing - rsc:%% action:(?:start|stop|promote|demote|migrate)",
+            "lrmd.*finished - rsc:%% action:(?:start|stop|promote|demote|migrate)",
+
+            "crmd.*LRM operation %%_(?:start|stop|promote|demote|migrate)_.*confirmed=true",
+            "crmd.*LRM operation %%_.*Timed Out",
+            "[(]%%[)][[]",
+        ),
+        (  # detail 1
+            "crmd.*Initiating.*%%_(?:monitor_0|notify)",
+            "lrmd.*executing - rsc:%% action:(?:monitor_0|notify)",
+            "lrmd.*finished - rsc:%% action:(?:monitor_0|notify)",
+        ),
+    ),
+    "node": (
+        (  # detail 0
+            " %% .*Corosync.Cluster.Engine",
+            " %% .*Executive.Service.RELEASE",
+            " %% .*crm_shutdown:.Requesting.shutdown",
+            " %% .*pcmk_shutdown:.Shutdown.complete",
+            " %% .*Configuration.validated..Starting.heartbeat",
+            "pengine.*Scheduling Node %% for STONITH",
+            "pengine.*Node %% will be fenced",
+            "crmd.*for %% failed",
+            "stonith-ng.*host '%%'",
+            "Exec.*on %% ",
+            "Node %% will be fenced",
+            "stonith-ng.*on %% for.*timed out",
+            "stonith-ng.*can not fence %%:",
+            "stonithd.*Succeeded.*node %%:",
+            "(?:lost|memb): %% ",
+            "crmd.*(?:NEW|LOST|new|lost):.* %% ",
+            "Node return implies stonith of %% ",
+        ),
+        (  # detail 1
+        ),
+    ),
+    "quorum": (
+        (  # detail 0
+            "crmd.*Updating.(quorum).status",
+            "crmd.*quorum.(?:lost|ac?quir[^\s]*)",
+        ),
+        (  # detail 1
+        ),
+    ),
+    "events": (
+        (  # detail 0
+            "(CRIT|crit|ERROR|error|UNCLEAN|unclean):",
+        ),
+        (  # detail 1
+            "(WARN|warning):",
+        ),
+    ),
+}
+
+
+def patterns(cib_f=None):
+    is118 = utils.is_pcmk_118(cib_f=cib_f)
+    if is118:
+        return _patterns_118
+    else:
+        return _patterns_old
diff --git a/modules/log_patterns_118.py b/modules/log_patterns_118.py
deleted file mode 100644
index 25dad77..0000000
--- a/modules/log_patterns_118.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (C) 2012 Dejan Muhamedagic <dmuhamedagic at suse.de>
-# See COPYING for license information.
-#
-# log pattern specification (for pacemaker v1.1.8)
-#
-# patterns are grouped one of several classes:
-#  - resource: pertaining to a resource
-#  - node: pertaining to a node
-#  - quorum: quorum changes
-#  - events: other interesting events (core dumps, etc)
-#
-# paterns are grouped based on a detail level
-# detail level 0 is the lowest, i.e. should match the least
-# number of relevant messages
-
-# NB:
-# %% stands for whatever user input we get, for instance a
-# resource name or node name or just some regular expression
-# in optimal case, it should be surrounded by literals
-#
-# [Note that resources may contain clone numbers!]
-
-log_patterns = {
-    "resource": (
-        (  # detail 0
-            "crmd.*Initiating.*%%_(?:start|stop|promote|demote|migrate)_",
-            "lrmd.*operation_finished: %%_",
-            "crmd.*LRM operation %%_(?:start|stop|promote|demote|migrate)_.*confirmed=true",
-            "crmd.*LRM operation %%_.*Timed Out",
-            "[(]%%[)][[]",
-        ),
-        (  # detail 1
-            "crmd.*Initiating%%_(?:monitor_0|notify)",
-        ),
-    ),
-    "node": (
-        (  # detail 0
-            " %% .*Corosync.Cluster.Engine",
-            " %% .*Executive.Service.RELEASE",
-            " %% .*crm_shutdown:.Requesting.shutdown",
-            " %% .*pcmk_shutdown:.Shutdown.complete",
-            " %% .*Configuration.validated..Starting.heartbeat",
-            "pengine.*Scheduling Node %% for STONITH",
-            "pengine.*Node %% will be fenced",
-            "crmd.*for %% failed",
-            "stonith-ng.*host '%%'",
-            "Exec.*on %% ",
-            "Node %% will be fenced",
-            "stonith-ng.*on %% for.*timed out",
-            "stonith-ng.*can not fence %%:",
-            "stonithd.*Succeeded.*node %%:",
-            "(?:lost|memb): %% ",
-            "crmd.*(?:NEW|LOST):.* %% ",
-            "Node return implies stonith of %% ",
-        ),
-        (  # detail 1
-        ),
-    ),
-    "quorum": (
-        (  # detail 0
-            "crmd.*Updating.quorum.status",
-            "crmd.*quorum.(?:lost|ac?quir)",
-        ),
-        (  # detail 1
-        ),
-    ),
-    "events": (
-        (  # detail 0
-            "(?:CRIT|crit):",
-            "(?:ERROR|error):",
-        ),
-        (  # detail 1
-            "(?:WARN|warning):",
-        ),
-    ),
-}
diff --git a/modules/logparser.py b/modules/logparser.py
new file mode 100644
index 0000000..9615fec
--- /dev/null
+++ b/modules/logparser.py
@@ -0,0 +1,627 @@
+# Copyright (C) 2016 Kristoffer Gronlund <kgronlund at suse.com>
+# See COPYING for license information.
+
+import bz2
+import gzip
+import re
+import os
+import sys
+import collections
+import json
+import time
+
+from . import msg as crmlog
+from . import xmlutil
+from . import logtime
+from . import utils
+from . import log_patterns
+
+
+_METADATA_FILENAME = "__meta.json"
+_METADATA_CACHE_AGE = (60.0 * 60.0)
+# Update this when changing the metadata format
+_METADATA_VERSION = 1
+
+
+def _open_logfile(logfile):
+    """
+    Open a file which may be gz|bz2 compressed.
+    Uncompress based on extension.
+    """
+    try:
+        if logfile.endswith(".bz2"):
+            return bz2.BZ2File(logfile)
+        if logfile.endswith(".gz"):
+            return gzip.open(logfile)
+        return open(logfile)
+    except IOError, msg:
+        crmlog.common_error("open %s: %s" % (logfile, msg))
+        return None
+
+
+def _transition_start_re():
+    """
+    Return regular expression matching transition start.
+    number_re can be a specific transition or a regexp matching
+    any transition number.
+    The resulting RE has groups
+    1: transition number
+    2: full path of pe file
+    3: pe file number
+    """
+    m1 = "crmd.*Processing graph ([0-9]+).*derived from (.*/pe-[^-]+-([0-9]+)[.]bz2)"
+    m2 = "pengine.*Transition ([0-9]+):.*([^ ]*/pe-[^-]+-([0-9]+)[.]bz2)"
+    try:
+        return re.compile("(?:%s)|(?:%s)" % (m1, m2))
+    except re.error, e:
+        crmlog.common_debug("RE compilation failed: %s" % (e))
+        raise ValueError("Error in search expression")
+
+
+def pefile_shortname(pe_file):
+    return os.path.basename(pe_file).replace(".bz2", "")
+
+
+def trans_str(node, pe_file):
+    '''Convert node,pe_file to transition string.'''
+    return "%s:%s" % (node, pefile_shortname(pe_file))
+
+
+def _transition_end_re():
+    """
+    Return RE matching transition end.
+    See transition_start_re for more details.
+
+    1: trans_num
+    2: pe_file
+    3: pe_num
+    4: state
+    """
+    try:
+        return re.compile("crmd.*Transition ([0-9]+).*Source=(.*/pe-[^-]+-([0-9]+)[.]bz2).:.*(Stopped|Complete|Terminated)")
+    except re.error, e:
+        crmlog.common_debug("RE compilation failed: %s" % (e))
+        raise ValueError("Error in search expression")
+
+
+_GRAPH_ACTIONS_RE = re.compile("([A-Z][a-z]+)=([0-9]+)")
+
+
+def _run_graph_msg_actions(msg):
+    '''
+    crmd: [13667]: info: run_graph: Transition 399 (Complete=5,
+    Pending=1, Fired=1, Skipped=0, Incomplete=3,
+    Source=...
+    Returns dict: d[Pending]=np, d[Fired]=nf, ...
+    Only stores non-zero values.
+    '''
+    d = {}
+    s = msg
+    r = _GRAPH_ACTIONS_RE.search(s)
+    while r:
+        val = int(r.group(2))
+        if val != 0:
+            d[r.group(1)] = val
+        s = s[r.end():]
+        r = _GRAPH_ACTIONS_RE.search(s)
+    return d
+
+
+def mk_re_list(patt_l, repl):
+    'Build a list of regular expressions, replace "%%" with repl'
+    l = []
+    for re_l in patt_l:
+        l += [x.replace("%%", repl) for x in re_l]
+    if not repl:
+        l = [x.replace(".*.*", ".*") for x in l]
+    return l
+
+
+class Transition(object):
+    __slots__ = ('loc', 'dc', 'start_ts', 'trans_num', 'pe_file', 'pe_num', 'end_ts', 'end_state', 'end_actions', 'tags')
+
+    def __init__(self, loc, dc, start_ts, trans_num, pe_file, pe_num):
+        self.loc = loc
+        self.dc = dc
+        self.start_ts = start_ts
+        self.trans_num = trans_num
+        self.pe_file = pe_file
+        self.pe_num = pe_num
+        self.end_ts = None
+        self.end_state = None
+        self.end_actions = None
+        self.tags = set()
+
+    def __str__(self):
+        return trans_str(self.dc, self.pe_file)
+
+    def shortname(self):
+        return pefile_shortname(self.pe_file)
+
+    def actions(self):
+        return self.end_actions
+
+    def actions_count(self):
+        if self.end_actions is not None:
+            return sum(self.end_actions.values())
+        return -1
+
+    def path(self):
+        return os.path.join(self.loc, self.dc, "pengine", self.pe_file)
+
+    def description(self):
+        s = "%s %s - %s: %-15s %-15s %s" % (
+            utils.shortdate(self.start_ts),
+            utils.shorttime(self.start_ts),
+            self.end_ts and utils.shorttime(self.end_ts) or "--:--:--",
+            self.shortname(),
+            self.dc,
+            " ".join(sorted(self.tags))
+        )
+        return s
+
+    def empty(self, prev):
+        """
+        True if this transition resulted in no actions and no CIB changes
+        prev: previous transition
+        """
+        old_pe_l_file = prev.path()
+        new_pe_l_file = self.path()
+        no_actions = self.actions_count() == 0
+        if not os.path.isfile(old_pe_l_file) or not os.path.isfile(new_pe_l_file):
+            return no_actions
+        old_cib = xmlutil.compressed_file_to_cib(old_pe_l_file)
+        new_cib = xmlutil.compressed_file_to_cib(new_pe_l_file)
+        if old_cib is None or new_cib is None:
+            return no_actions
+        prev_epoch = old_cib.attrib.get("epoch", "0")
+        epoch = new_cib.attrib.get("epoch", "0")
+        prev_admin_epoch = old_cib.attrib.get("admin_epoch", "0")
+        admin_epoch = new_cib.attrib.get("admin_epoch", "0")
+        return no_actions and epoch == prev_epoch and admin_epoch == prev_admin_epoch
+
+    def transition_info(self):
+        print "Transition %s (%s -" % (self, utils.shorttime(self.start_ts)),
+        if self.end_ts:
+            print "%s):" % utils.shorttime(self.end_ts)
+            act_d = self.actions()
+            total = self.actions_count()
+            s = ", ".join(["%d %s" % (act_d[x], x) for x in act_d if act_d[x]])
+            print "\ttotal %d actions: %s" % (total, s)
+        else:
+            print "[unfinished])"
+
+    def to_dict(self):
+        """
+        Serialize to dict (for cache)
+        """
+        o = {"tags": list(self.tags)}
+        for k in self.__slots__:
+            if k in ("loc", "tags"):
+                continue
+            o[k] = getattr(self, k)
+        return o
+
+    @classmethod
+    def from_dict(cls, loc, obj):
+        t = Transition(loc, None, None, None, None, None)
+        for k, v in obj.iteritems():
+            setattr(t, k, set(v) if k == "tags" else v)
+        return t
+
+
+class CibInfo(object):
+    def __init__(self, report_path):
+        self.filename = utils.file_find_by_name(report_path, "cib.xml")
+        self.nodes = []
+        self.primitives = []
+        self.groups = {}
+        self.clones = {}
+        self.cloned_resources = set()
+        self.not_cloned_resources = set()
+
+        if self.filename:
+            cib_elem = xmlutil.file2cib_elem(self.filename)
+
+        if cib_elem is None:
+            return
+
+        self.nodes = [x.get("uname") or x.get("id") for x in cib_elem.xpath("/cib/configuration/nodes/node")]
+
+        self.primitives = [x.get("id") for x in cib_elem.xpath("/cib/configuration/resources//primitive")]
+
+        for grp in cib_elem.xpath("/cib/configuration/resources/group"):
+            self.groups[grp.get("id")] = xmlutil.get_rsc_children_ids(grp)
+
+        for cln in cib_elem.xpath("/cib/configuration/resources/*[self::clone or self::master]"):
+            self.clones[cln.get("id")] = xmlutil.get_prim_children_ids(cln)
+            self.cloned_resources.union(self.clones[cln.get("id")])
+
+        self.not_cloned_resources = set(x for x in self.primitives if x not in self.cloned_resources)
+
+    def resources(self):
+        return self.primitives.keys() + self.groups.keys() + self.clones.keys()
+
+    def match_resources(self):
+        """
+        list of regex expressions to match resources
+        """
+        rsc_l = list(self.not_cloned_resources)
+        rsc_l += ["%s(?::[0-9]+)?" % x for x in self.cloned_resources]
+        return rsc_l
+
+
+class LogParser(object):
+    """
+    Used by the history explorer.
+    Given a report directory, generates log metadata.
+
+    TODO:
+
+    This information is then written to a file called %(_METADATA_FILENAME),
+    and the next time the history explorer is started, we skip the
+    analysis and load the metadata directly.
+
+    The analysis is done over the complete log: Timeframe narrowing happens elsewhere.
+    """
+
+    def __init__(self, loc, cib, logfiles, detail):
+        """
+        report_root: Base directory of the report
+        """
+        self.loc = loc
+        self.cib = cib
+        self.filenames = logfiles
+        self.fileobjs = [_open_logfile(f) for f in logfiles]
+        self.detail = detail
+
+        self.events = {}
+        self.transitions = []
+
+        self.from_ts = None
+        self.to_ts = None
+
+    def __del__(self):
+        for f in self.fileobjs:
+            f.close()
+
+    def scan(self, mode=None):
+        """
+        mode = 'refresh':
+        Re-read logs that may have new data appended.
+        Right now this re-scans all the log data.
+        TODO: Only scan new data by tracking the previous
+        end of each file and scanning from there. Retain
+        previous data and just add new transitions / events.
+
+        Returns list of pefiles missing from report. [(node, [pefile ...]) ...]
+
+        mode = 'force':
+        Completely re-parse (ignore cache)
+        """
+        with utils.nogc():
+            return self._scan(mode=mode)
+
+    def _scan(self, mode):
+        """
+        Scan logs and generate metadata for transitions,
+        tags and events. (used when retreiving log lines later)
+
+        Returns list of pefiles missing from report. [(node, [pefile ...]) ...]
+
+        mode: None, 'refresh' or 'force'
+
+        TODO: Load/save metadata when already generated.
+        TODO: scan each logfile in a separate thread?
+        """
+
+        if mode not in ('refresh', 'force') and self._load_cache():
+            return []
+
+        missing_pefiles = []
+
+        # {etype -> [(sortkey, msg)]}
+        # TODO: store (sortkey, fileid, spos) instead?
+        self.events = collections.defaultdict(list)
+
+        self.transitions = []
+
+        # trans_num:pe_num -> Transition()
+        transitions_map = {}
+
+        startre = _transition_start_re()
+        endre = _transition_end_re()
+
+        eventre = {}
+        eventre["node"] = self._build_re("node", self.cib.nodes)
+        eventre["resource"] = self._build_re("resource", self.cib.match_resources())
+        eventre["quorum"] = self._build_re("quorum", [])
+        eventre["events"] = self._build_re("events", [])
+
+        DEFAULT, IN_TRANSITION = 0, 1
+        state = DEFAULT
+        transition = None
+
+        for logidx, (filename, log) in enumerate(zip(self.filenames, self.fileobjs)):
+            log.seek(0)
+            crmlog.common_debug("parsing %s" % (filename))
+            line = "a"
+            while line != '':
+                spos = log.tell()
+                line = log.readline()
+                m = startre.search(line)
+                if m:
+                    # m.groups() is (transnum1, pefile1, penum1, transnum2, pefile2, penum2) where
+                    # it matched either 1 or 2
+                    t1, p1, n1, t2, p2, n2 = m.groups()
+                    if t1 is not None:
+                        trans_num, pe_file, pe_num = t1, p1, n1
+                    else:
+                        trans_num, pe_file, pe_num = t2, p2, n2
+                    pe_orig = pe_file
+                    pe_file = os.path.basename(pe_orig)
+                    ts, dc = logtime.syslog_ts_node(line)
+                    if ts is None or dc is None:
+                        continue
+                    id_ = trans_str(dc, pe_file)
+                    transition = transitions_map.get(id_)
+                    if transition is None:
+                        transition = Transition(self.loc, dc, ts, trans_num, pe_file, pe_num)
+                        self.transitions.append(transition)
+                        transitions_map[id_] = transition
+                        crmlog.common_debug("{Transition: %s" % (transition))
+
+                        if not os.path.isfile(transition.path()):
+                            missing_pefiles.append((dc, pe_orig))
+                    else:
+                        crmlog.common_debug("~Transition: %s old(%s, %s) new(%s, %s)" %
+                                            (transition, transition.trans_num, transition.pe_file, trans_num, pe_file))
+                    state = IN_TRANSITION
+                    continue
+                if state == IN_TRANSITION:
+                    m = endre.search(line)
+                    if m:
+                        trans_num, pe_file, pe_num, state = m.groups()
+                        pe_file = os.path.basename(pe_file)
+                        ts, dc = logtime.syslog_ts_node(line)
+                        if ts is None or dc is None:
+                            continue
+                        transition = transitions_map.get(trans_str(dc, pe_file))
+                        if transition is None:
+                            # transition end without previous begin...
+                            crmlog.common_debug("Found transition end without start: %s: %s - %s:%s" % (ts, filename, trans_num, pe_file))
+                        else:
+                            transition.end_state = state
+                            transition.end_ts = ts
+                            transition.end_actions = _run_graph_msg_actions(line)
+                            crmlog.common_debug("}Transition: %s %s" % (transition, state))
+                        state = DEFAULT
+
+                # events
+                for etype, erx in eventre.iteritems():
+                    for rx in erx:
+                        m = rx.search(line)
+                        if m:
+                            ts = logtime.syslog_ts(line)
+                            if ts is None:
+                                continue
+                            crmlog.common_debug("+Event %s: %s" % (etype, ", ".join(m.groups())))
+                            sk = (long(ts) << 32) + long(spos)
+                            self.events[etype].append((sk, logidx, spos))
+                            if transition is not None:
+                                for t in m.groups():
+                                    if t:
+                                        transition.tags.add(t.lower())
+
+                if state == DEFAULT:
+                    transition = None
+
+        self.transitions.sort(key=lambda t: t.start_ts)
+        for etype, logs in self.events.iteritems():
+            logs.sort(key=lambda e: e[0])
+        empties = []
+        for i, t in enumerate(self.transitions):
+            if i == 0:
+                continue
+            if t.empty(self.transitions[i - 1]):
+                empties.append(t)
+        self.transitions = [t for t in self.transitions if t not in empties]
+        self._save_cache()
+        if missing_pefiles:
+            rdict = collections.defaultdict(list)
+            for node, pe in missing_pefiles:
+                rdict[node].append(pe)
+            missing_pefiles = list(rdict.items())
+        return missing_pefiles
+
+    def set_timeframe(self, from_t, to_t):
+        """
+        from_t, to_t: timestamps or datetime objects
+        """
+        self.from_ts = logtime.make_time(from_t)
+        self.to_ts = logtime.make_time(to_t)
+
+    def get_logs(self, nodes=None):
+        """
+        Generator which yields a list of log messages limited by the
+        list of nodes, or from all nodes.
+
+        The log lines are printed in order, by reading from
+        all files at once and always printing the line with
+        the lowest timestamp
+        """
+
+        def include_log(logfile):
+            return not nodes or os.path.basename(os.path.dirname(logfile)) in nodes
+
+        for f in self.fileobjs:
+            f.seek(0)
+
+        lines = [[None, f.readline(), f] for f in self.fileobjs]
+        for i, line in enumerate(lines):
+            if not line[1]:
+                line[0], line[2] = sys.float_info.max, None
+            else:
+                line[0] = logtime.syslog_ts(line[1])
+
+        while any(f is not None for _, _, f in lines):
+            x = min(lines, key=lambda v: v[0])
+            if x[2] is None:
+                break
+            if self.to_ts and x[0] > self.to_ts:
+                break
+            if not (self.from_ts and x[0] < self.from_ts):
+                yield x[1]
+            x[1] = x[2].readline()
+            if not x[1]:
+                x[0], x[2] = sys.float_info.max, None
+            else:
+                x[0] = logtime.syslog_ts(x[1])
+
+    def get_events(self, event=None, nodes=None, resources=None):
+        """
+        Generator which outputs matching event lines
+        event: optional node, resource, quorum
+        nodes: optional list of nodes
+        resources: optional list of resources
+
+        TODO: ordering, time limits
+        """
+        if event is not None:
+            eventlogs = [event]
+        else:
+            eventlogs = self.events.keys()
+
+        if nodes:
+            rxes = self._build_re(event, nodes)
+        elif resources:
+            expanded_l = []
+            for r in resources:
+                if r in self.cib.groups:
+                    expanded_l += self.cib.groups[r]
+                elif r in self.cib.clones:
+                    expanded_l += self.cib.clones[r]
+                else:
+                    expanded_l.append(r)
+
+            def clonify(r):
+                return r + "(?::[0-9]+)?" if r in self.cib.cloned_resources else r
+            expanded_l = [clonify(r) for r in expanded_l]
+            rxes = self._build_re(event, expanded_l)
+        else:
+            rxes = None
+
+        if event == "resource" and resources is not None and rxes is not None:
+            crmlog.common_debug("resource %s rxes: %s" % (", ".join(resources), ", ".join(r.pattern for r in rxes)))
+
+        if rxes is not None:
+            for log in eventlogs:
+                for _, f, pos in self.events.get(log, []):
+                    self.fileobjs[f].seek(pos)
+                    msg = self.fileobjs[f].readline()
+                    if any(rx.search(msg) for rx in rxes):
+                        ts = logtime.syslog_ts(msg)
+                        if not (self.from_ts and ts < self.from_ts) and not (self.to_ts and ts > self.to_ts):
+                            yield msg
+        else:
+            for log in eventlogs:
+                for _, f, pos in self.events.get(log, []):
+                    self.fileobjs[f].seek(pos)
+                    msg = self.fileobjs[f].readline()
+                    ts = logtime.syslog_ts(msg)
+                    if not (self.from_ts and ts < self.from_ts) and not (self.to_ts and ts > self.to_ts):
+                        yield msg
+
+    def get_transitions(self):
+        """
+        Yields transitions within the current timeframe
+        """
+        for t in self.transitions:
+            if not (self.from_ts and t.end_ts and t.end_ts < self.from_ts) and not (self.to_ts and t.start_ts and t.start_ts > self.to_ts):
+                yield t
+
+    def _get_patt_l(self, etype):
+        '''
+        get the list of patterns for this type, up to and
+        including current detail level
+        '''
+        patterns = log_patterns.patterns(cib_f=self.cib.filename)
+        if etype not in patterns:
+            crmlog.common_error("%s not featured in log patterns" % etype)
+            return None
+        return patterns[etype][0:self.detail+1]
+
+    def _build_re(self, etype, args):
+        '''
+        Prepare a regex string for the type and args.
+        For instance, "resource" and rsc1, rsc2, ...
+        '''
+        patt_l = self._get_patt_l(etype)
+        if not patt_l:
+            return None
+        if not args:
+            re_l = mk_re_list(patt_l, "")
+        else:
+            re_l = mk_re_list(patt_l, r'(%s)' % "|".join(args))
+        return [re.compile(r) for r in re_l]
+
+    def to_dict(self):
+        """
+        Serialize self to dict (including transition objects)
+        """
+        o = {
+            "version": _METADATA_VERSION,
+            "events": self.events,
+            "transitions": [t.to_dict() for t in self.transitions],
+            "cib": {
+                "nodes": self.cib.nodes,
+                "primitives": self.cib.primitives,
+                "groups": self.cib.groups,
+                "clones": self.cib.clones
+            }
+        }
+        return o
+
+    def from_dict(self, obj):
+        """
+        Load from dict
+        """
+        if "version" not in obj or obj["version"] != _METADATA_VERSION:
+            return False
+        self.events = obj["events"]
+        self.transitions = [Transition.from_dict(self.loc, t) for t in obj["transitions"]]
+        return True
+
+    def _metafile(self):
+        return os.path.join(self.loc, _METADATA_FILENAME)
+
+    def count(self):
+        """
+        Returns (num transitions, num events)
+        """
+        return len(self.transitions), sum(len(e) for e in self.events.values())
+
+    def _save_cache(self):
+        """
+        Save state to cache file
+        """
+        fn = self._metafile()
+        with open(fn, 'wb') as f:
+            json.dump(self.to_dict(), f, indent=2)
+            crmlog.common_debug("Transition metadata saved to %s" % (fn))
+
+    def _load_cache(self):
+        """
+        Load state from cache file
+        """
+        fn = self._metafile()
+        if os.path.isfile(fn) and time.time() - os.stat(fn).st_mtime < _METADATA_CACHE_AGE:
+            with open(fn, 'rb') as f:
+                try:
+                    if not self.from_dict(json.load(f)):
+                        return False
+                    crmlog.common_debug("Transition metadata loaded from %s" % (fn))
+                    return True
+                except ValueError as e:
+                    crmlog.common_debug("Failed to load metadata: %s" % (e))
+        return False
diff --git a/modules/logtime.py b/modules/logtime.py
new file mode 100644
index 0000000..a3a4343
--- /dev/null
+++ b/modules/logtime.py
@@ -0,0 +1,218 @@
+# Copyright (C) 2013-2016 Kristoffer Gronlund <kgronlund at suse.com>
+# See COPYING for license information.
+
+"""
+Helpers for handling log timestamps.
+"""
+
+import re
+import time
+import datetime
+from . import utils
+from . import msg as crmlog
+
+
+YEAR = None
+
+
+def set_year(ts=None):
+    '''
+    ts: optional time in seconds
+    '''
+    global YEAR
+    year = time.strftime("%Y", time.localtime(ts))
+    if YEAR is not None:
+        t = (" (ts: %s)" % (ts)) if ts is not None else ""
+        crmlog.common_debug("history: setting year to %s%s" % (year, t))
+    YEAR = year
+
+
+def human_date(dt=None):
+    '''
+    Convert datetime argument into a presentational string.
+
+    dt: Datetime (default: now)
+    '''
+    if dt is None:
+        dt = utils.make_datetime_naive(datetime.datetime.now())
+    # here, dt is in UTC. Convert to localtime:
+    localdt = datetime.datetime.fromtimestamp(utils.datetime_to_timestamp(dt))
+    # drop microseconds
+    return re.sub("[.].*", "", "%s %s" % (localdt.date(), localdt.time()))
+
+
+def make_time(t):
+    '''
+    t: time in seconds / datetime / other
+    returns: time in floating point
+    '''
+    if t is None:
+        return None
+    elif isinstance(t, datetime.datetime):
+        return utils.datetime_to_timestamp(t)
+    return t
+
+
+# fmt1: group 11 is node
+# fmt2: group 2 is node
+# fmt3: group 2 is node
+# fmt4: node not available?
+_syslog2node_formats = (re.compile(r'^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:.(\d+))?([+-])(\d{2}):?(\d{2})\s+(?:\[\d+\])?\s*([\S]+)'),
+                        re.compile(r'^(\d{4}-\d{2}-\d{2}T\S+)\s+(?:\[\d+\])?\s*([\S]+)'),
+                        re.compile(r'^([a-zA-Z]{2,4}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+(?:\[\d+\])?\s*([\S]+)'),
+                        re.compile(r'^(\d{4}\/\d{2}\/\d{2}_\d{2}:\d{2}:\d{2})'))
+
+_syslog_ts_prev = None
+
+
+def syslog_ts(s):
+    """
+    Finds the timestamp in the given line
+    Returns as floating point, seconds
+    """
+    global _syslog_ts_prev
+    fmt1, fmt2, fmt3, fmt4 = _syslog2node_formats
+
+    # RFC3339
+    m = fmt1.match(s)
+    if m:
+        year, month, day, hour, minute, second, ms, tzsgn, tzh, tzm, _ = m.groups()
+        ts = time.mktime((int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, -1))
+        if tzsgn == '+':
+            ts += (3600.0 * float(tzh) + 60.0 * float(tzm))
+        else:
+            ts -= (3600.0 * float(tzh) + 60.0 * float(tzm))
+        if ms:
+            ts += float("0.%s" % ms)
+        _syslog_ts_prev = ts
+        return _syslog_ts_prev
+
+    m = fmt2.match(s)
+    if m:
+        _syslog_ts_prev = utils.parse_to_timestamp(m.group(1))
+        return _syslog_ts_prev
+
+    m = fmt3.match(s)
+    if m:
+        if YEAR is None:
+            set_year()
+        tstr = YEAR + ' ' + m.group(1)
+
+        dt = datetime.datetime.strptime(tstr, '%Y %b %d %H:%M:%S')
+        from dateutil import tz
+        ts = utils.total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))
+        _syslog_ts_prev = ts
+        return _syslog_ts_prev
+
+    m = fmt4.match(s)
+    if m:
+        tstr = m.group(1).replace('_', ' ')
+        _syslog_ts_prev = utils.parse_to_timestamp(tstr)
+        return _syslog_ts_prev
+
+    crmlog.common_debug("malformed line: %s" % s)
+    return _syslog_ts_prev
+
+
+_syslog_node_prev = None
+
+
+def syslog2node(s):
+    '''
+    Get the node from a syslog line.
+
+    old format:
+    Aug 14 11:07:04 <node> ...
+    new format:
+    Aug 14 11:07:04 [<PID>] <node> ...
+    RFC5424:
+    <TS> <node> ...
+    RFC5424 (2):
+    <TS> [<PID>] <node> ...
+    '''
+    global _syslog_node_prev
+
+    fmt1, fmt2, fmt3, _ = _syslog2node_formats
+    m = fmt1.match(s)
+    if m:
+        _syslog_node_prev = m.group(11)
+        return _syslog_node_prev
+
+    m = fmt2.match(s)
+    if m:
+        _syslog_node_prev = m.group(2)
+        return _syslog_node_prev
+
+    m = fmt3.match(s)
+    if m:
+        _syslog_node_prev = m.group(2)
+        return _syslog_node_prev
+
+    try:
+        # strptime defaults year to 1900 (sigh)
+        time.strptime(' '.join(s.split()[0:3]),
+                      "%b %d %H:%M:%S")
+        _syslog_node_prev = s.split()[3]
+        return _syslog_node_prev
+    except ValueError:  # try the rfc5424
+        ls = s.split()
+        if not ls:
+            return _syslog_node_prev
+        rfc5424 = s.split()[0]
+        if 'T' in rfc5424:
+            try:
+                utils.parse_to_timestamp(rfc5424)
+                _syslog_node_prev = s.split()[1]
+                return _syslog_node_prev
+            except Exception:
+                return _syslog_node_prev
+        else:
+            return _syslog_node_prev
+
+
+def syslog_ts_node(s):
+    """
+    Returns (timestamp, node) from a syslog log line
+    """
+    global _syslog_ts_prev
+    global _syslog_node_prev
+    fmt1, fmt2, fmt3, fmt4 = _syslog2node_formats
+
+    # RFC3339
+    m = fmt1.match(s)
+    if m:
+        year, month, day, hour, minute, second, ms, tzsgn, tzh, tzm, node = m.groups()
+        ts = time.mktime((int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, -1))
+        if tzsgn == '+':
+            ts += (3600.0 * float(tzh) + 60.0 * float(tzm))
+        else:
+            ts -= (3600.0 * float(tzh) + 60.0 * float(tzm))
+        _syslog_ts_prev = ts
+        _syslog_node_prev = node
+        return _syslog_ts_prev, node
+
+    m = fmt2.match(s)
+    if m:
+        _syslog_ts_prev, _syslog_node_prev = utils.parse_to_timestamp(m.group(1)), m.group(2)
+        return _syslog_ts_prev, _syslog_node_prev
+
+    m = fmt3.match(s)
+    if m:
+        if YEAR is None:
+            set_year()
+        tstr = YEAR + ' ' + m.group(1)
+
+        dt = datetime.datetime.strptime(tstr, '%Y %b %d %H:%M:%S')
+        from dateutil import tz
+        ts = utils.total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))
+        _syslog_ts_prev, _syslog_node_prev = ts, m.group(2)
+        return _syslog_ts_prev, _syslog_node_prev
+
+    m = fmt4.match(s)
+    if m:
+        tstr = m.group(1).replace('_', ' ')
+        _syslog_ts_prev = utils.parse_to_timestamp(tstr)
+        return _syslog_ts_prev, _syslog_node_prev
+
+    crmlog.common_debug("malformed line: %s" % s)
+    return _syslog_ts_prev, _syslog_node_prev
diff --git a/modules/main.py b/modules/main.py
index 5672c3f..9113267 100644
--- a/modules/main.py
+++ b/modules/main.py
@@ -127,6 +127,10 @@ See the crm(8) man page or call %prog help for more details.""",
                       "of directories separated by semi-colons (e.g. /dir1;/dir2;etc.).")
     parser.add_option("-X", dest="profile", metavar="PROFILE",
                       help="Collect profiling data and save in PROFILE.")
+    parser.add_option("-o", "--opt", action="append", type="string", metavar="OPTION=VALUE",
+                      help="Set crmsh option temporarily. If the options are saved using" +
+                      "+options save+ then the value passed here will also be saved." +
+                      "Multiple options can be set by using +-o+ multiple times.")
     return parser
 
 
@@ -307,6 +311,13 @@ def parse_options():
     options.shadow = opts.cib or options.shadow
     options.scriptdir = opts.scriptdir or options.scriptdir
     options.ask_no = opts.ask_no
+    for opt in opts.opt or []:
+        try:
+            k, v = opt.split('=')
+            s, n = k.split('.')
+            config.set_option(s, n, v)
+        except ValueError as e:
+            raise ValueError("Expected -o <section>.<name>=<value>: %s" % (e))
     return args
 
 
@@ -344,7 +355,7 @@ def run():
             err_buf.reset_lineno()
             options.batch = True
         user_args = parse_options()
-        term._init()
+        term.init()
         if options.profile:
             return profile_run(context, user_args)
         else:
diff --git a/modules/ordereddict.py b/modules/ordereddict.py
index 198fa30..0d39aef 100644
--- a/modules/ordereddict.py
+++ b/modules/ordereddict.py
@@ -1,130 +1,130 @@
-# Copyright (c) 2009 Raymond Hettinger
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-#     The above copyright notice and this permission notice shall be
-#     included in all copies or substantial portions of the Software.
-#
-#     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-#     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-#     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-#     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-#     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-#     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-#     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-#     OTHER DEALINGS IN THE SOFTWARE.
-
-from UserDict import DictMixin
-
-
-class OrderedDict(dict, DictMixin):
-
-    def __init__(self, *args, **kwds):
-        if len(args) > 1:
-            raise TypeError('expected at most 1 arguments, got %d' % len(args))
-        try:
-            self.__end
-        except AttributeError:
-            self.clear()
-        self.update(*args, **kwds)
-
-    def clear(self):
-        self.__end = end = []
-        end += [None, end, end]         # sentinel node for doubly linked list
-        self.__map = {}                 # key --> [key, prev, next]
-        dict.clear(self)
-
-    def __setitem__(self, key, value):
-        if key not in self:
-            end = self.__end
-            curr = end[1]
-            curr[2] = end[1] = self.__map[key] = [key, curr, end]
-        dict.__setitem__(self, key, value)
-
-    def __delitem__(self, key):
-        dict.__delitem__(self, key)
-        key, prev, next = self.__map.pop(key)
-        prev[2] = next
-        next[1] = prev
-
-    def __iter__(self):
-        end = self.__end
-        curr = end[2]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[2]
-
-    def __reversed__(self):
-        end = self.__end
-        curr = end[1]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[1]
-
-    def popitem(self, last=True):
-        if not self:
-            raise KeyError('dictionary is empty')
-        if last:
-            key = reversed(self).next()
-        else:
-            key = iter(self).next()
-        value = self.pop(key)
-        return key, value
-
-    def __reduce__(self):
-        items = [[k, self[k]] for k in self]
-        tmp = self.__map, self.__end
-        del self.__map, self.__end
-        inst_dict = vars(self).copy()
-        self.__map, self.__end = tmp
-        if inst_dict:
-            return (self.__class__, (items,), inst_dict)
-        return self.__class__, (items,)
-
-    def keys(self):
-        return list(self)
-
-    setdefault = DictMixin.setdefault
-    update = DictMixin.update
-    pop = DictMixin.pop
-    values = DictMixin.values
-    items = DictMixin.items
-    iterkeys = DictMixin.iterkeys
-    itervalues = DictMixin.itervalues
-    iteritems = DictMixin.iteritems
-
-    def __repr__(self):
-        if not self:
-            return '%s()' % (self.__class__.__name__,)
-        return '%s(%r)' % (self.__class__.__name__, self.items())
-
-    def copy(self):
-        return self.__class__(self)
-
-    @classmethod
-    def fromkeys(cls, iterable, value=None):
-        d = cls()
-        for key in iterable:
-            d[key] = value
-        return d
-
-    def __eq__(self, other):
-        if isinstance(other, OrderedDict):
-            if len(self) != len(other):
-                return False
-            for p, q in zip(self.items(), other.items()):
-                if p != q:
-                    return False
-            return True
-        return dict.__eq__(self, other)
-
-    def __ne__(self, other):
-        return not self == other
-
-odict = OrderedDict
+# Copyright (c) 2009 Raymond Hettinger
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+#     The above copyright notice and this permission notice shall be
+#     included in all copies or substantial portions of the Software.
+#
+#     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+#     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+#     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+#     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+#     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+#     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+#     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+#     OTHER DEALINGS IN THE SOFTWARE.
+
+from UserDict import DictMixin
+
+
+class OrderedDict(dict, DictMixin):
+
+    def __init__(self, *args, **kwds):
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        try:
+            self.__end
+        except AttributeError:
+            self.clear()
+        self.update(*args, **kwds)
+
+    def clear(self):
+        self.__end = end = []
+        end += [None, end, end]         # sentinel node for doubly linked list
+        self.__map = {}                 # key --> [key, prev, next]
+        dict.clear(self)
+
+    def __setitem__(self, key, value):
+        if key not in self:
+            end = self.__end
+            curr = end[1]
+            curr[2] = end[1] = self.__map[key] = [key, curr, end]
+        dict.__setitem__(self, key, value)
+
+    def __delitem__(self, key):
+        dict.__delitem__(self, key)
+        key, prev, next_ = self.__map.pop(key)
+        prev[2] = next_
+        next_[1] = prev
+
+    def __iter__(self):
+        end = self.__end
+        curr = end[2]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[2]
+
+    def __reversed__(self):
+        end = self.__end
+        curr = end[1]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[1]
+
+    def popitem(self, last=True):
+        if not self:
+            raise KeyError('dictionary is empty')
+        if last:
+            key = reversed(self).next()
+        else:
+            key = iter(self).next()
+        value = self.pop(key)
+        return key, value
+
+    def __reduce__(self):
+        items = [[k, self[k]] for k in self]
+        tmp = self.__map, self.__end
+        del self.__map, self.__end
+        inst_dict = vars(self).copy()
+        self.__map, self.__end = tmp
+        if inst_dict:
+            return (self.__class__, (items,), inst_dict)
+        return self.__class__, (items,)
+
+    def keys(self):
+        return list(self)
+
+    setdefault = DictMixin.setdefault
+    update = DictMixin.update
+    pop = DictMixin.pop
+    values = DictMixin.values
+    items = DictMixin.items
+    iterkeys = DictMixin.iterkeys
+    itervalues = DictMixin.itervalues
+    iteritems = DictMixin.iteritems
+
+    def __repr__(self):
+        if not self:
+            return '%s()' % (self.__class__.__name__,)
+        return '%s(%r)' % (self.__class__.__name__, self.items())
+
+    def copy(self):
+        return self.__class__(self)
+
+    @classmethod
+    def fromkeys(cls, iterable, value=None):
+        d = cls()
+        for key in iterable:
+            d[key] = value
+        return d
+
+    def __eq__(self, other):
+        if isinstance(other, OrderedDict):
+            if len(self) != len(other):
+                return False
+            for p, q in zip(self.items(), other.items()):
+                if p != q:
+                    return False
+            return True
+        return dict.__eq__(self, other)
+
+    def __ne__(self, other):
+        return not self == other
+
+odict = OrderedDict
diff --git a/modules/orderedset.py b/modules/orderedset.py
index 0464ca0..46ede2b 100644
--- a/modules/orderedset.py
+++ b/modules/orderedset.py
@@ -49,9 +49,9 @@ class OrderedSet(collections.MutableSet):
 
     def discard(self, key):
         if key in self.map:
-            key, prev, next = self.map.pop(key)
-            prev[NEXT] = next
-            next[PREV] = prev
+            key, prev, next_ = self.map.pop(key)
+            prev[NEXT] = next_
+            next_[PREV] = prev
 
     def __iter__(self):
         end = self.end
@@ -67,8 +67,8 @@ class OrderedSet(collections.MutableSet):
             yield curr[KEY]
             curr = curr[PREV]
 
-    def pop(self, last=True):
-        # changed default to last=False - by default, treat as queue.
+    def pop(self):
+        last = True
         if not self:
             raise KeyError('set is empty')
         key = next(reversed(self)) if last else next(iter(self))
diff --git a/modules/pacemaker.py b/modules/pacemaker.py
index 891f386..bb26458 100644
--- a/modules/pacemaker.py
+++ b/modules/pacemaker.py
@@ -201,23 +201,23 @@ class RngSchema(Schema):
         self.update_rng_docs(self.validate_name, self.schema_filename)
         return True
 
-    def update_rng_docs(self, validate_name="", file=""):
-        self.rng_docs[file] = self.find_start_rng_node(validate_name, file)
-        if self.rng_docs[file] is None:
+    def update_rng_docs(self, validate_name, filename):
+        self.rng_docs[filename] = self.find_start_rng_node(validate_name, filename)
+        if self.rng_docs[filename] is None:
             return
-        for extern_ref in self.rng_docs[file][0].xpath(self.expr, name="externalRef"):
+        for extern_ref in self.rng_docs[filename][0].xpath(self.expr, name="externalRef"):
             href_value = extern_ref.get("href")
             if self.rng_docs.get(href_value) is None:
                 self.update_rng_docs(validate_name, href_value)
 
-    def find_start_rng_node(self, validate_name="", file=""):
-        schema_info = validate_name + " " + file
+    def find_start_rng_node(self, validate_name, filename):
+        schema_info = validate_name + " " + filename
         crm_schema = self.get_schema_fn(validate_name,
-                                        os.path.join(self.local_dir, file))
+                                        os.path.join(self.local_dir, filename))
         if not crm_schema:
             raise PacemakerError("Cannot get the Relax-NG schema: " + schema_info)
 
-        self.schema_str_docs[file] = crm_schema
+        self.schema_str_docs[filename] = crm_schema
 
         try:
             grammar = etree.fromstring(crm_schema)
diff --git a/modules/parse.py b/modules/parse.py
index 4809c30..535db2e 100644
--- a/modules/parse.py
+++ b/modules/parse.py
@@ -48,12 +48,13 @@ class BaseParser(object):
     def init(self, validation):
         self.validation = validation
 
-    def err(self, errmsg):
+    def err(self, msg, context=None, token=None):
         "Report a parse error and abort."
-        token = None
-        if self.has_tokens():
+        if token is None and self.has_tokens():
             token = self._cmd[self._currtok]
-        syntax_err(self._cmd, context=self._cmd[0], token=token, msg=errmsg)
+        if context is None:
+            context = self._cmd[0]
+        syntax_err(self._cmd, context=context, token=token, msg=msg)
         raise ParseError
 
     def begin(self, cmd, min_args=-1):
@@ -190,7 +191,7 @@ class BaseParser(object):
         """
         ret = []
         if terminator is None:
-            terminator = RuleParser._TERMINATORS
+            terminator = RuleParser.TERMINATORS
         while True:
             tok = self.current_token()
             if tok is not None and tok.lower() in terminator:
@@ -314,7 +315,7 @@ class RuleParser(BaseParser):
     _UNARYOP_RE = re.compile(r'(%s)$' % ('|'.join(constants.unary_ops)), re.IGNORECASE)
     _BINOP_RE = None
 
-    _TERMINATORS = ('params', 'meta', 'utilization', 'operations', 'op', 'rule', 'attributes')
+    TERMINATORS = ('params', 'meta', 'utilization', 'operations', 'op', 'rule', 'attributes')
 
     def match_attr_list(self, name, tag, allow_empty=True):
         """
@@ -581,7 +582,6 @@ class NodeParser(RuleParser):
 class ResourceParser(RuleParser):
     _TEMPLATE_RE = re.compile(r'@(.+)$')
     _RA_TYPE_RE = re.compile(r'[a-z0-9_:-]+$', re.IGNORECASE)
-    _OPTYPE_RE = re.compile(r'(%s)$' % ('|'.join(constants.op_cli_names)), re.IGNORECASE)
 
     def can_parse(self):
         return ('primitive', 'group', 'clone', 'ms', 'master', 'rsc_template')
@@ -610,7 +610,7 @@ class ResourceParser(RuleParser):
           </op>
         """
         self.match('op')
-        op_type = self.match(self._OPTYPE_RE, errmsg="Expected operation type")
+        op_type = self.match_identifier()
         all_attrs = self.match_nvpairs(minpairs=0)
         node = xmlbuilder.new('op', name=op_type)
         if not any(nvp.get('name') == 'interval' for nvp in all_attrs):
@@ -879,11 +879,11 @@ class ConstraintParser(RuleParser):
         return [[name, info[0]]]
 
     def _split_setref(self, typename, classifier):
-            rsc, typ = self.match_split()
-            typ, t = classifier(typ)
-            if typ and not t:
-                self.err("Invalid %s '%s' for '%s'" % (typename, typ, rsc))
-            return rsc, typ, t
+        rsc, typ = self.match_split()
+        typ, t = classifier(typ)
+        if typ and not t:
+            self.err("Invalid %s '%s' for '%s'" % (typename, typ, rsc))
+        return rsc, typ, t
 
     def match_simple_role_set(self, count):
         ret = self._fmt(self._split_setref('role', self.validation.classify_role), 'rsc')
@@ -976,7 +976,7 @@ class FencingOrderParser(BaseParser):
     """
 
     _TARGET_RE = re.compile(r'([\w=-]+):$')
-    _TARGET_ATTR_RE = re.compile(r'attr:([\w-]+)=([\w-]+)$')
+    _TARGET_ATTR_RE = re.compile(r'attr:([\w-]+)=([\w-]+)$', re.IGNORECASE)
 
     def can_parse(self):
         return ('fencing-topology', 'fencing_topology')
@@ -1319,9 +1319,9 @@ class ResourceSet(object):
         '(': ')',
     }
 
-    def __init__(self, type, s, parent):
+    def __init__(self, q_attr, s, parent):
         self.parent = parent
-        self.q_attr = type
+        self.q_attr = q_attr
         self.tokens = s
         self.cli_list = []
         self.reset_set()
@@ -1398,11 +1398,7 @@ class ResourceSet(object):
         return l
 
     def err(self, errmsg, token=''):
-        syntax_err(self.parent._cmd,
-                   context=self.q_attr,
-                   token=token,
-                   msg=errmsg)
-        raise ParseError
+        self.parent.err(msg=errmsg, context=self.q_attr, token=token)
 
     def update_attrs(self, bracket, tokpos):
         if bracket in ('(', '['):
diff --git a/modules/ra.py b/modules/ra.py
index fa7867c..93acaba 100644
--- a/modules/ra.py
+++ b/modules/ra.py
@@ -225,25 +225,25 @@ def ra_classes():
 
 def ra_providers(ra_type, ra_class="ocf"):
     'List of providers for a class:type.'
-    id = "ra_providers-%s-%s" % (ra_class, ra_type)
-    if cache.is_cached(id):
-        return cache.retrieve(id)
+    ident = "ra_providers-%s-%s" % (ra_class, ra_type)
+    if cache.is_cached(ident):
+        return cache.retrieve(ident)
     l = ra_if().providers(ra_type, ra_class)
     l.sort()
-    return cache.store(id, l)
+    return cache.store(ident, l)
 
 
 def ra_providers_all(ra_class="ocf"):
     '''
     List of providers for a class.
     '''
-    id = "ra_providers_all-%s" % ra_class
-    if cache.is_cached(id):
-        return cache.retrieve(id)
+    ident = "ra_providers_all-%s" % ra_class
+    if cache.is_cached(ident):
+        return cache.retrieve(ident)
     ocf = os.path.join(os.environ["OCF_ROOT"], "resource.d")
     if os.path.isdir(ocf):
-        return cache.store(id, sorted([s for s in os.listdir(ocf)
-                                       if os.path.isdir(os.path.join(ocf, s))]))
+        return cache.store(ident, sorted(s for s in os.listdir(ocf)
+                                         if os.path.isdir(os.path.join(ocf, s))))
     return []
 
 
@@ -253,17 +253,17 @@ def ra_types(ra_class="ocf", ra_provider=""):
     '''
     if not ra_class:
         ra_class = "ocf"
-    id = "ra_types-%s-%s" % (ra_class, ra_provider)
-    if cache.is_cached(id):
-        return cache.retrieve(id)
-    list = []
-    for ra in ra_if().types(ra_class):
-        if (not ra_provider or
-                ra_provider in ra_providers(ra, ra_class)) \
-                and ra not in list:
-            list.append(ra)
-    list.sort()
-    return cache.store(id, list)
+    ident = "ra_types-%s-%s" % (ra_class, ra_provider)
+    if cache.is_cached(ident):
+        return cache.retrieve(ident)
+
+    if not ra_provider:
+        def include(ra):
+            return True
+    else:
+        def include(ra):
+            return ra_provider in ra_providers(ra, ra_class)
+    return cache.store(ident, sorted(list(set(ra for ra in ra_if().types(ra_class) if include(ra)))))
 
 
 @utils.memoize
@@ -431,9 +431,7 @@ class RAInfo(object):
     def param_type_default(self, n):
         try:
             content = n.find("content")
-            type = content.get("type")
-            default = content.get("default")
-            return type, default
+            return content.get("type"), content.get("default")
         except:
             return None, None
 
@@ -442,9 +440,9 @@ class RAInfo(object):
         Construct a dict of dicts: parameters are keys and
         dictionary of attributes/values are values. Cached too.
         '''
-        id = "ra_params-%s" % self.ra_string()
-        if cache.is_cached(id):
-            return cache.retrieve(id)
+        ident = "ra_params-%s" % self.ra_string()
+        if cache.is_cached(ident):
+            return cache.retrieve(ident)
         if self.mk_ra_node() is None:
             return None
         d = {}
@@ -454,14 +452,14 @@ class RAInfo(object):
                 continue
             required = c.get("required")
             unique = c.get("unique")
-            type, default = self.param_type_default(c)
+            typ, default = self.param_type_default(c)
             d[name] = {
                 "required": required,
                 "unique": unique,
-                "type": type,
+                "type": typ,
                 "default": default,
             }
-        return cache.store(id, d)
+        return cache.store(ident, d)
 
     def completion_params(self):
         '''
@@ -479,9 +477,9 @@ class RAInfo(object):
         Construct a dict of dicts: actions are keys and
         dictionary of attributes/values are values. Cached too.
         '''
-        id = "ra_actions-%s" % self.ra_string()
-        if cache.is_cached(id):
-            return cache.retrieve(id)
+        ident = "ra_actions-%s" % self.ra_string()
+        if cache.is_cached(ident):
+            return cache.retrieve(ident)
         if self.mk_ra_node() is None:
             return None
         d = {}
@@ -507,7 +505,7 @@ class RAInfo(object):
                 if norole_op not in d:
                     d2[norole_op] = d[op]
         d.update(d2)
-        return cache.store(id, d)
+        return cache.store(ident, d)
 
     def reqd_params_list(self):
         '''
@@ -543,7 +541,7 @@ class RAInfo(object):
                 return True
         return False
 
-    def sanity_check_params(self, id, nvpairs, existence_only=False):
+    def sanity_check_params(self, ident, nvpairs, existence_only=False):
         '''
         nvpairs is a list of <nvpair> tags.
         - are all required parameters defined
@@ -559,14 +557,14 @@ class RAInfo(object):
                 if self.unreq_param(p):
                     continue
                 if p not in d:
-                    common_err("%s: required parameter %s not defined" % (id, p))
+                    common_err("%s: required parameter %s not defined" % (ident, p))
                     rc |= utils.get_check_rc()
         for p in d:
             if p.startswith("$"):
                 # these are special, non-RA parameters
                 continue
             if p not in self.params():
-                common_err("%s: parameter %s does not exist" % (id, p))
+                common_err("%s: parameter %s does not exist" % (ident, p))
                 rc |= utils.get_check_rc()
         return rc
 
@@ -580,7 +578,7 @@ class RAInfo(object):
         except:
             return None
 
-    def sanity_check_ops(self, id, ops, default_timeout):
+    def sanity_check_ops(self, ident, ops, default_timeout):
         '''
         ops is a list of operations
         - do all operations exist
@@ -604,19 +602,19 @@ class RAInfo(object):
             if self.ra_class == "stonith" and op in ("start", "stop"):
                 continue
             if op not in self.actions():
-                common_warn("%s: action '%s' not found in Resource Agent meta-data" % (id, op))
+                common_warn("%s: action '%s' not found in Resource Agent meta-data" % (ident, op))
                 rc |= 1
             if "interval" in n_ops[op]:
                 v = n_ops[op]["interval"]
                 v_msec = crm_msec(v)
                 if op in ("start", "stop") and v_msec != 0:
-                    common_warn("%s: Specified interval for %s is %s, it must be 0" % (id, op, v))
+                    common_warn("%s: Specified interval for %s is %s, it must be 0" % (ident, op, v))
                     rc |= 1
                 if op.startswith("monitor") and v_msec != 0:
                     if v_msec not in intervals:
                         intervals[v_msec] = 1
                     else:
-                        common_warn("%s: interval in %s must be unique" % (id, op))
+                        common_warn("%s: interval in %s must be unique" % (ident, op))
                         rc |= 1
             try:
                 adv_timeout = self.actions()[op]["timeout"]
@@ -632,7 +630,7 @@ class RAInfo(object):
                 continue
             if crm_time_cmp(adv_timeout, v) > 0:
                 common_warn("%s: %s %s for %s is smaller than the advised %s" %
-                            (id, timeout_string, v, op, adv_timeout))
+                            (ident, timeout_string, v, op, adv_timeout))
                 rc |= 1
         return rc
 
@@ -771,11 +769,11 @@ def get_ra(r):
     or a CLI style class:provider:type string.
     """
     if isinstance(r, basestring):
-        cls, provider, type = disambiguate_ra_type(r)
+        cls, provider, typ = disambiguate_ra_type(r)
     else:
-        cls, provider, type = r.get('class'), r.get('provider'), r.get('type')
+        cls, provider, typ = r.get('class'), r.get('provider'), r.get('type')
     # note order of arguments!
-    return RAInfo(cls, type, provider)
+    return RAInfo(cls, typ, provider)
 
 
 #
diff --git a/modules/rsctest.py b/modules/rsctest.py
index 587897d..1fbf158 100644
--- a/modules/rsctest.py
+++ b/modules/rsctest.py
@@ -31,12 +31,12 @@ class RADriver(object):
             self.ra_class = rsc_node.get("class")
             self.ra_type = rsc_node.get("type")
             self.ra_provider = rsc_node.get("provider")
-            self.id = rsc_node.get("id")
+            self.ident = rsc_node.get("id")
         else:
             self.ra_class = None
             self.ra_type = None
             self.ra_provider = None
-            self.id = None
+            self.ident = None
         self.nodes = nodes
         self.outdir = mkdtemp(prefix="crmsh_out.")
         self.errdir = mkdtemp(prefix="crmsh_err.")
@@ -53,7 +53,7 @@ class RADriver(object):
         rmdir_r(self.errdir)
 
     def id_str(self):
-        return self.last_op and "%s:%s" % (self.id, self.last_op) or self.id
+        return self.last_op and "%s:%s" % (self.ident, self.last_op) or self.ident
 
     def err(self, s):
         common_err("%s: %s" % (self.id_str(), s))
@@ -248,7 +248,7 @@ class RAOCF(RADriver):
     def set_rscenv(self, op):
         RADriver.set_rscenv(self, op)
         self.nvset2env(get_child_nvset_node(self.rscdef_node, "instance_attributes"))
-        self.rscenv["OCF_RESOURCE_INSTANCE"] = self.id
+        self.rscenv["OCF_RESOURCE_INSTANCE"] = self.ident
         self.rscenv["OCF_ROOT"] = os.environ["OCF_ROOT"]
 
     def exec_cmd(self, op):
@@ -415,10 +415,10 @@ def test_resources(resources, nodes, all_nodes):
         started = []
         sys.stderr.write("testing on %s:" % node)
         for r in resources:
-            id = r.get("id")
+            ident = r.get("id")
             ra_class = r.get("class")
             drv = ra_driver[ra_class](r, (node,))
-            sys.stderr.write(" %s" % id)
+            sys.stderr.write(" %s" % ident)
             if drv.test_resource(node):
                 started.append(drv)
             else:
diff --git a/modules/schema.py b/modules/schema.py
index df3f127..2cd7b8d 100644
--- a/modules/schema.py
+++ b/modules/schema.py
@@ -113,15 +113,15 @@ def validate_name():
     return _crm_schema.validate_name
 
 
-def get(t, name, set=None):
+def get(t, name, subset=None):
     if _crm_schema is None:
         return []
     if t not in _store:
         _store[t] = {}
     if name not in _store[t]:
         _store[t][name] = _cache_funcs[t](_crm_schema, name)
-    if set:
-        return _store[t][name][set]
+    if subset:
+        return _store[t][name][subset]
     else:
         return _store[t][name]
 
diff --git a/modules/scripts.py b/modules/scripts.py
index c3bb4ca..3ae29ea 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -72,27 +72,27 @@ class Text(object):
 
     @staticmethod
     def shortdesc(script, text):
-        return Text(script, text, type=Text.SHORTDESC)
+        return Text(script, text, kind=Text.SHORTDESC)
 
     @staticmethod
     def desc(script, text):
-        return Text(script, text, type=Text.DESC)
+        return Text(script, text, kind=Text.DESC)
 
     @staticmethod
     def cib(script, text):
-        return Text(script, text, type=Text.CIB)
+        return Text(script, text, kind=Text.CIB)
 
     @staticmethod
     def isa(obj):
         return isinstance(obj, basestring) or isinstance(obj, Text)
 
-    def __init__(self, script, text, type=None):
+    def __init__(self, script, text, kind=None):
         self.script = script
         if isinstance(text, Text):
             self.text = text.text
         else:
             self.text = text
-        self.type = type
+        self._kind = kind
 
     def _parse(self):
         val = self.text
@@ -106,11 +106,11 @@ class Text(object):
         return repr(self.text)
 
     def __str__(self):
-        if self.type == self.DESC:
+        if self._kind == self.DESC:
             return format_desc(self._parse())
-        elif self.type == self.SHORTDESC:
+        elif self._kind == self.SHORTDESC:
             return self._parse()
-        elif self.type == self.CIB:
+        elif self._kind == self.CIB:
             return format_cib(self._parse())
         return self._parse()
 
@@ -153,7 +153,7 @@ class Actions(object):
     Each method in this class handles a particular action.
     """
     @staticmethod
-    def _parse(script, action):
+    def parse(script, action):
         """
         action: action data (dict)
         params: flat list of parameter values
@@ -183,7 +183,7 @@ class Actions(object):
                 action['value'] = value
 
             def arrow(v):
-                return ' -> '.join(x.items()[0])
+                return ' -> '.join(v.items()[0])
             action['text'] = '\n'.join([arrow(x) for x in value])
         elif name == 'cib' or name == 'crm':
             action['text'] = str(Text.cib(script, value))
@@ -204,21 +204,29 @@ class Actions(object):
             action['longdesc'] = ''
         else:
             action['longdesc'] = Text.desc(script, action['longdesc'])
+
+        hre = handles.headmatcher
+
         if 'when' in action:
             when = action['when']
-            if re.search(r'\{\{.*\}\}', when):
+            if hre.search(when):
                 action['when'] = Text(script, when)
             elif when:
                 action['when'] = Text(script, '{{%s}}' % (when))
             else:
                 del action['when']
+        for k, v in action.iteritems():
+            if isinstance(v, basestring) and hre.search(v):
+                v = Text(script, v)
+            if Text.isa(v):
+                action[k] = str(v).strip()
 
     @staticmethod
-    def _mergeable(action):
+    def mergeable(action):
         return action['name'] in ('cib', 'crm', 'install', 'service')
 
     @staticmethod
-    def _merge(into, new):
+    def merge(into, new):
         """
         Merge neighbour actions.
         Note: When this is called, all text values
@@ -247,9 +255,9 @@ class Actions(object):
         return True
 
     @staticmethod
-    def _needs_sudo(action):
-        if action['name'] == 'call' and action.get('sudo'):
-            return True
+    def needs_sudo(action):
+        if action['name'] == 'call':
+            return action.get('sudo') or action.get('nodes') != 'local'
         return action['name'] in ('apply', 'apply_local', 'install', 'service')
 
     def __init__(self, run, action):
@@ -463,11 +471,11 @@ def _upgrade_yaml(data):
 _hawk_template_cache = {}
 
 
-def _parse_hawk_template(workflow, name, type, step, actions):
+def _parse_hawk_template(workflow, name, kind, step, actions):
     """
     Convert a hawk template into steps + a cib action
     """
-    path = os.path.join(os.path.dirname(workflow), '../templates', type + '.xml')
+    path = os.path.join(os.path.dirname(workflow), '../templates', kind + '.xml')
     if path in _hawk_template_cache:
         xml = _hawk_template_cache[path]
     elif os.path.isfile(path):
@@ -598,7 +606,7 @@ def _parse_hawk_workflow(scriptname, scriptfile):
     return data
 
 
-def _build_script_cache():
+def build_script_cache():
     global _script_cache
     if _script_cache is not None:
         return
@@ -624,7 +632,7 @@ def list_scripts():
     List the available cluster installation scripts.
     Yields the names of the main script files.
     '''
-    _build_script_cache()
+    build_script_cache()
     return sorted(_script_cache.keys())
 
 
@@ -691,8 +699,7 @@ def _process_agent_include(script, include):
         raise ValueError("No meta-data for agent: %s" % (agent))
     name = include.get('name', meta.get('name'))
     if not name:
-        cls, provider, type = ra.disambiguate_ra_type(agent)
-        name = type
+        cls, provider, name = ra.disambiguate_ra_type(agent)
     if 'name' not in include:
         include['name'] = name
     step = _listfindpend(name, script['steps'], lambda x: x.get('name'), lambda: {
@@ -989,7 +996,7 @@ def _join_script_lines(txt):
     return s
 
 
-def _load_script_file(script, filename):
+def load_script_file(script, filename):
     if filename.endswith('.yml'):
         parsed = _parse_yaml(script, filename)
     elif filename.endswith('.xml'):
@@ -1005,7 +1012,7 @@ def _load_script_file(script, filename):
 
 
 def load_script_string(script, yml):
-    _build_script_cache()
+    build_script_cache()
     import cStringIO
     import yaml
     data = yaml.load(cStringIO.StringIO(yml))
@@ -1027,14 +1034,14 @@ def load_script_string(script, yml):
 
 
 def load_script(script):
-    _build_script_cache()
+    build_script_cache()
     if script not in _script_cache:
         common_debug("cache: %s" % (_script_cache.keys()))
         raise ValueError("Script not found: %s" % (script))
     s = _script_cache[script]
     if isinstance(s, basestring):
         try:
-            return _load_script_file(script, s)
+            return load_script_file(script, s)
         except KeyError as err:
             raise ValueError("Error when loading script %s: Expected key %s not found" % (script, err))
         except Exception as err:
@@ -1110,13 +1117,14 @@ def _generate_workdir_name():
 def _print_debug(printer, local_node, hosts, workdir, opts):
     "Print debug output (if any)"
     dbglog = os.path.join(workdir, 'crm_script.debug')
-    for host, result in _parallax_call(printer, hosts,
-                                       "if [ -f '%s' ]; then cat '%s'; fi" % (dbglog, dbglog),
-                                       opts).iteritems():
-        if isinstance(result, parallax.Error):
-            printer.error(host, result)
-        else:
-            printer.output(host, *result)
+    if hosts:
+        for host, result in _parallax_call(printer, hosts,
+                                           "if [ -f '%s' ]; then cat '%s'; fi" % (dbglog, dbglog),
+                                           opts).iteritems():
+            if isinstance(result, parallax.Error):
+                printer.error(host, result)
+            else:
+                printer.output(host, *result)
     if os.path.isfile(dbglog):
         f = open(dbglog).read()
         printer.output(local_node, 0, f, '')
@@ -1279,24 +1287,24 @@ def _valid_ip(value):
 def _verify_type(param, value, errors):
     if value is None:
         value = ''
-    type = param.get('type')
-    if not type:
+    vtype = param.get('type')
+    if not vtype:
         return value
-    elif type == 'integer':
+    elif vtype == 'integer':
         ok, _ = _valid_integer(value)
         if not ok:
             errors.append("%s=%s is not an integer" % (param.get('name'), value))
-    elif type == 'string':
+    elif vtype == 'string':
         return value
-    elif type == 'boolean':
+    elif vtype == 'boolean':
         return "true" if _make_boolean(value) else "false"
-    elif type == 'resource':
+    elif vtype == 'resource':
         try:
             if not _IDENT_RE.match(value):
                 errors.append("%s=%s invalid resource identifier" % (param.get('name'), value))
         except TypeError as e:
             errors.append("%s=%s %s" % (param.get('name'), value, str(e)))
-    elif type == 'enum':
+    elif vtype == 'enum':
         if 'values' not in param:
             errors.append("%s=%s enum without list of values" % (param.get('name'), value))
         else:
@@ -1306,12 +1314,11 @@ def _verify_type(param, value, errors):
             for v in opts:
                 if value.lower() == v.lower():
                     return v
-            else:
-                errors.append("%s=%s does not match '%s'" % (param.get('name'), value, "|".join(opts)))
-    elif type == 'ip_address':
+            errors.append("%s=%s does not match '%s'" % (param.get('name'), value, "|".join(opts)))
+    elif vtype == 'ip_address':
         if not _valid_ip(value):
             errors.append("%s=%s is not an IP address" % (param.get('name'), value))
-    elif type == 'ip_network':
+    elif vtype == 'ip_network':
         sp = value.rsplit('/', 1)
         if len(sp) == 1 and not (is_valid_ipv4_address(value) or is_valid_ipv6_address(value)):
             errors.append("%s=%s is not a valid IP network" % (param.get('name'), value))
@@ -1319,17 +1326,17 @@ def _verify_type(param, value, errors):
             errors.append("%s=%s is not a valid IP network" % (param.get('name'), value))
         else:
             errors.append("%s=%s is not a valid IP network" % (param.get('name'), value))
-    elif type == 'port':
+    elif vtype == 'port':
         ok, ival = _valid_integer(value)
         if not ok:
             errors.append("%s=%s is not a valid port" % (param.get('name'), value))
         if ival < 0 or ival > 65535:
             errors.append("%s=%s is out of port range" % (param.get('name'), value))
-    elif type == 'email':
+    elif vtype == 'email':
         if not re.match(r'[^@]+@[^@]+', value):
             errors.append("%s=%s is not a valid email address" % (param.get('name'), value))
     else:
-        errors.append("%s=%s is unknown type %s" % (param.get('name'), value, type))
+        errors.append("%s=%s is unknown type %s" % (param.get('name'), value, vtype))
     return value
 
 _NO_RESOLVE = object()
@@ -1762,7 +1769,7 @@ class RunActions(object):
             if not os.path.isfile(statefile):
                 raise ValueError("No state for action: %s" % (action_index))
             self.data = json.load(open(statefile))
-        if Actions._needs_sudo(action):
+        if Actions.needs_sudo(action):
             self._check_sudo_pass()
         result = self._run_action(action)
         json.dump(self.data, open(self.statefile, 'w'))
@@ -1774,7 +1781,7 @@ class RunActions(object):
         # run on local nodes
         # TODO: wait for remote results
         for action in self.actions:
-            if Actions._needs_sudo(action):
+            if Actions.needs_sudo(action):
                 self._check_sudo_pass()
             if not self._run_action(action):
                 return False
@@ -1870,7 +1877,7 @@ class RunActions(object):
         return False
 
     def _check_sudo_pass(self):
-        if self.sudo and not self.sudo_pass:
+        if self.sudo and not self.sudo_pass and userdir.getuser() != 'root':
             prompt = "sudo password: "
             self.sudo_pass = getpass.getpass(prompt=prompt)
 
@@ -1878,6 +1885,8 @@ class RunActions(object):
         islocal = False
         if nodes == 'all':
             pass
+        elif nodes == 'local':
+            islocal = True
         elif nodes is not None and nodes != []:
             islocal = nodes == [self.local_node_name()]
         else:
@@ -1886,16 +1895,20 @@ class RunActions(object):
         return islocal
 
     def call(self, nodes, cmdline, is_json_output=False):
-        if not self._is_local(nodes):
-            self.result = self._process_remote(cmdline, is_json_output)
+        if cmdline.startswith("#!"):
+            self.execute_shell(nodes or 'all', cmdline)
         else:
-            self.result = self._process_local(cmdline, is_json_output)
-        self.rc = self.result not in (False, None)
+            if not self._is_local(nodes):
+                self.result = self._process_remote(cmdline, is_json_output)
+            else:
+                self.result = self._process_local(cmdline, is_json_output)
+            self.rc = self.result not in (False, None)
 
     def execute_shell(self, nodes, cmdscript):
         """
         execute the shell script...
         """
+        cmdscript = str(cmdscript).rstrip() + '\n'
         if self.dry_run:
             self.printer.print_command(nodes, cmdscript)
             self.result = ''
@@ -2008,7 +2021,8 @@ class RunActions(object):
             return None
         self.printer.debug("Result(local): %s" % repr(out))
         if is_json_output:
-            out = json.loads(out)
+            if out != '':
+                out = json.loads(out)
         return out
 
     def local_node_name(self):
@@ -2069,8 +2083,10 @@ def run(script, params, printer):
         if not dry_run:
             if not config.core.debug:
                 _run_cleanup(printer, has_remote_actions, local_node, hosts, workdir, opts)
-            else:
+            elif has_remote_actions:
                 _print_debug(printer, local_node, hosts, workdir, opts)
+            else:
+                _print_debug(printer, local_node, None, workdir, opts)
 
 
 def _remove_empty_lines(txt):
@@ -2114,7 +2130,7 @@ def _process_actions(script, params):
             if action['include'] in subactions:
                 toadd.extend(subactions[action['include']])
         else:
-            Actions._parse(script, action)
+            Actions.parse(script, action)
             if 'when' in action:
                 when = str(action['when']).strip()
                 if when not in (False, None, '', 'false'):
@@ -2123,8 +2139,8 @@ def _process_actions(script, params):
                 toadd.append(action)
         if ret:
             for add in toadd:
-                if Actions._mergeable(add) and ret[-1]['name'] == add['name']:
-                    if not Actions._merge(ret[-1], add):
+                if Actions.mergeable(add) and ret[-1]['name'] == add['name']:
+                    if not Actions.merge(ret[-1], add):
                         ret.append(add)
                 else:
                     ret.append(add)
diff --git a/modules/term.py b/modules/term.py
index 2627277..af99294 100644
--- a/modules/term.py
+++ b/modules/term.py
@@ -6,29 +6,27 @@ import re
 
 # from: http://code.activestate.com/recipes/475116/
 
-"""
-A module that can be used to portably generate formatted output to
-a terminal.
-Defines a set of instance variables whose
-values are initialized to the control sequence necessary to
-perform a given action.  These can be simply included in normal
-output to the terminal:
-    >>> print 'This is '+term.colors.GREEN+'green'+term.colors.NORMAL
-Alternatively, the `render()` method can used, which replaces
-'${action}' with the string required to perform 'action':
-    >>> print term.render('This is ${GREEN}green${NORMAL}')
-If the terminal doesn't support a given action, then the value of
-the corresponding instance variable will be set to ''.  As a
-result, the above code will still work on terminals that do not
-support color, except that their output will not be colored.
-Also, this means that you can test whether the terminal supports a
-given action by simply testing the truth value of the
-corresponding instance variable:
-    >>> if term.colors.CLEAR_SCREEN:
-    ...     print 'This terminal supports clearning the screen.'
-Finally, if the width and height of the terminal are known, then
-they will be stored in the `COLS` and `LINES` attributes.
-"""
+# A module that can be used to portably generate formatted output to
+# a terminal.
+# Defines a set of instance variables whose
+# values are initialized to the control sequence necessary to
+# perform a given action.  These can be simply included in normal
+# output to the terminal:
+#     >>> print 'This is '+term.colors.GREEN+'green'+term.colors.NORMAL
+# Alternatively, the `render()` method can used, which replaces
+# '${action}' with the string required to perform 'action':
+#     >>> print term.render('This is ${GREEN}green${NORMAL}')
+# If the terminal doesn't support a given action, then the value of
+# the corresponding instance variable will be set to ''.  As a
+# result, the above code will still work on terminals that do not
+# support color, except that their output will not be colored.
+# Also, this means that you can test whether the terminal supports a
+# given action by simply testing the truth value of the
+# corresponding instance variable:
+#     >>> if term.colors.CLEAR_SCREEN:
+#     ...     print 'This terminal supports clearning the screen.'
+# Finally, if the width and height of the terminal are known, then
+# they will be stored in the `COLS` and `LINES` attributes.
 
 
 class colors(object):
@@ -74,7 +72,7 @@ _COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
 _ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
 
 
-def _init():
+def init():
     """
     Initialize attributes with appropriate values for the current terminal.
 
diff --git a/modules/tmpfiles.py b/modules/tmpfiles.py
index bf5dbe7..811f5e5 100644
--- a/modules/tmpfiles.py
+++ b/modules/tmpfiles.py
@@ -39,18 +39,18 @@ def add(filename):
     _FILES.append(filename)
 
 
-def create(dir=utils.get_tempdir(), prefix='crmsh_'):
+def create(directory=utils.get_tempdir(), prefix='crmsh_'):
     '''
     Create a temporary file and remove it at program exit.
     Returns (fd, filename)
     '''
-    fd, fname = mkstemp(dir=dir, prefix=prefix)
+    fd, fname = mkstemp(dir=directory, prefix=prefix)
     add(fname)
     return fd, fname
 
 
-def create_dir(dir=utils.get_tempdir(), prefix='crmsh_'):
-    ret = mkdtemp(dir=dir, prefix=prefix)
+def create_dir(directory=utils.get_tempdir(), prefix='crmsh_'):
+    ret = mkdtemp(dir=directory, prefix=prefix)
     if len(_FILES) + len(_DIRS) == 0:
         atexit.register(_exit_handler)
     _DIRS.append(ret)
diff --git a/modules/ui_cib.py b/modules/ui_cib.py
index c2bc209..7f5c25e 100644
--- a/modules/ui_cib.py
+++ b/modules/ui_cib.py
@@ -58,7 +58,7 @@ class CibShadow(command.UI):
             del argl[0]
             constants.tmp_cib = False
         else:
-            fd, fname = tmpfiles.create(dir=xmlutil.cib_shadow_dir(), prefix="shadow.crmsh_")
+            fd, fname = tmpfiles.create(directory=xmlutil.cib_shadow_dir(), prefix="shadow.crmsh_")
             name = os.path.basename(fname).replace("shadow.", "")
             constants.tmp_cib = True
 
diff --git a/modules/ui_configure.py b/modules/ui_configure.py
index cf98702..ce24270 100644
--- a/modules/ui_configure.py
+++ b/modules/ui_configure.py
@@ -87,7 +87,7 @@ def stonith_resource_list(args):
 
 def _load_2nd_completer(args):
     if args[1] == 'xml':
-        return ['replace', 'update']
+        return ['replace', 'update', 'push']
     return []
 
 
@@ -324,12 +324,7 @@ class CibConfig(command.UI):
             else:
                 print v
         for p in properties:
-            v = cib_factory.get_property(p)
-            if v is None:
-                try:
-                    v = ra.get_properties_meta().param_default(p)
-                except:
-                    pass
+            v = cib_factory.get_property_w_default(p)
             if v is not None:
                 print_value(v)
             elif truth:
@@ -491,9 +486,9 @@ class CibConfig(command.UI):
         return set_obj.save_to_file(filename)
 
     @command.skill_level('administrator')
-    @command.completers(compl.choice(['xml', 'replace', 'update']), _load_2nd_completer)
+    @command.completers(compl.choice(['xml', 'replace', 'update', 'push']), _load_2nd_completer)
     def do_load(self, context, *args):
-        "usage: load [xml] {replace|update} {<url>|<path>}"
+        "usage: load [xml] {replace|update|push} {<url>|<path>}"
         if len(args) < 2:
             context.fatal_error("Expected 2 arguments (0 given)")
         if args[0] == "xml":
@@ -508,7 +503,7 @@ class CibConfig(command.UI):
             url = args[1]
             method = args[0]
             xml = False
-        if method not in ("replace", "update"):
+        if method not in ("replace", "update", "push"):
             context.fatal_error("Unknown method %s" % method)
         if method == "replace":
             if options.interactive and cib_factory.has_cib_changed():
@@ -552,6 +547,7 @@ class CibConfig(command.UI):
             if not ok or not cib_factory.commit():
                 raise ValueError("Failed to stop one or more running resources: %s" %
                                  (', '.join(to_stop)))
+        return len(to_stop)
 
     @command.skill_level('administrator')
     @command.completers_repeating(_id_list)
@@ -562,8 +558,8 @@ class CibConfig(command.UI):
         arg_force = any((x in ('-f', '--force')) for x in argl)
         argl = [x for x in argl if (x not in ('-f', '--force'))]
         if arg_force or config.core.force:
-            self._stop_if_running(argl)
-            utils.wait4dc(what="Stopping %s" % (", ".join(argl)))
+            if self._stop_if_running(argl) > 0:
+                utils.wait4dc(what="Stopping %s" % (", ".join(argl)))
         return cib_factory.delete(*argl)
 
     @command.name('default-timeouts')
@@ -610,9 +606,6 @@ class CibConfig(command.UI):
         return ui_utils.ptestlike(set_obj.ptest, 'vv', context.get_command_name(), args)
 
     def _commit(self, force=False, replace=False):
-        if force:
-            syntax_err(('configure.commit', force))
-            return False
         if not cib_factory.has_cib_changed():
             common_info("apparently there is nothing to commit")
             common_info("try changing something first")
diff --git a/modules/ui_context.py b/modules/ui_context.py
index 143425d..7947705 100644
--- a/modules/ui_context.py
+++ b/modules/ui_context.py
@@ -11,8 +11,8 @@ from . import ui_utils
 from . import userdir
 
 
-#import logging
-#logging.basicConfig(level=logging.DEBUG,
+# import logging
+# logging.basicConfig(level=logging.DEBUG,
 #                    filename='/tmp/crm-completion.log',
 #                    filemode='a')
 
@@ -140,10 +140,10 @@ class Context(object):
                 # not sure this is the right thing to do
                 return self.current_level().get_completions()
             except ValueError:
-                #common_err("%s: %s" % (self.get_qualified_name(), msg))
+                # common_err("%s: %s" % (self.get_qualified_name(), msg))
                 pass
             except IOError:
-                #common_err("%s: %s" % (self.get_qualified_name(), msg))
+                # common_err("%s: %s" % (self.get_qualified_name(), msg))
                 pass
             return []
         finally:
@@ -157,10 +157,10 @@ class Context(object):
         import readline
         readline.set_history_length(100)
         for v in ('tab: complete',
-                  #'set bell-style visible',
-                  #'set menu-complete-display-prefix on',
-                  #'set show-all-if-ambiguous on',
-                  #'set show-all-if-unmodified on',
+                  # 'set bell-style visible',
+                  # 'set menu-complete-display-prefix on',
+                  # 'set show-all-if-ambiguous on',
+                  # 'set show-all-if-unmodified on',
                   'set skip-completed-text on'):
             readline.parse_and_bind(v)
         readline.set_completer(self.readline_completer)
@@ -190,15 +190,15 @@ class Context(object):
                     self._rl_words = [w for w in completions if matching(w)]
                 else:
                     self._rl_words = completions
-            except Exception, msg:
-                #logging.exception(msg)
+            except Exception:  # , msg:
+                # logging.exception(msg)
                 self.clear_readline_cache()
 
         try:
             ret = self._rl_words[state]
         except IndexError:
             ret = None
-        #logging.debug("line:%s, text:%s, ret:%s, state:%s", repr(line), repr(text), ret, state)
+        # logging.debug("line:%s, text:%s, ret:%s, state:%s", repr(line), repr(text), ret, state)
         if not text or (ret and line.split()[-1].endswith(ret)):
             return ret + ' '
         return ret
diff --git a/modules/ui_corosync.py b/modules/ui_corosync.py
index 771acdf..0dec867 100644
--- a/modules/ui_corosync.py
+++ b/modules/ui_corosync.py
@@ -120,9 +120,9 @@ class Corosync(command.UI):
     @command.name('add-node')
     @command.alias('add_node')
     @command.skill_level('administrator')
-    def do_addnode(self, context, name):
+    def do_addnode(self, context, addr, name=None):
         "Add a node to the corosync nodelist"
-        corosync.add_node(name)
+        corosync.add_node(addr, name)
 
     @command.name('del-node')
     @command.alias('del_node')
diff --git a/modules/ui_history.py b/modules/ui_history.py
index 2a632d1..8ef2f50 100644
--- a/modules/ui_history.py
+++ b/modules/ui_history.py
@@ -69,21 +69,16 @@ class History(command.UI):
                 return False
         return crm_report().set_period(from_dt, to_dt)
 
-    def _check_source(self, src):
-        'a (very) quick source check'
-        if src == "live":
-            return True
-        if os.path.isfile(src) or os.path.isdir(src):
-            return True
-        return False
-
     def _set_source(self, src, live_from_time=None):
         '''
         Have the last history source survive the History
         and Report instances
         '''
+        def _check_source():
+            return (src == 'live') or os.path.isfile(src) or os.path.isdir(src)
+
         common_debug("setting source to %s" % src)
-        if not self._check_source(src):
+        if not _check_source():
             if os.path.exists(crm_report().get_session_dir(src)):
                 common_debug("Interpreting %s as session" % src)
                 if crm_report().load_state(crm_report().get_session_dir(src)):
@@ -127,9 +122,6 @@ class History(command.UI):
     def do_refresh(self, context, force=''):
         "usage: refresh"
         self._init_source()
-        if options.history != "live":
-            common_info("nothing to refresh if source isn't live")
-            return False
         if force:
             if force != "force" and force != "--force":
                 context.fatal_error("Expected 'force' or '--force' (was '%s')" % (force))
@@ -193,7 +185,7 @@ class History(command.UI):
     def do_log(self, context, *args):
         "usage: log [<node> ...]"
         self._init_source()
-        return crm_report().log(*args)
+        return crm_report().show_log(*args)
 
     def ptest(self, nograph, scores, utilization, actions, verbosity):
         'Send a decompressed self.pe_file to ptest'
@@ -225,9 +217,9 @@ class History(command.UI):
                 if a and len(a) == 2 and not utils.check_range(a):
                     common_err("%s: invalid peinputs range" % a)
                     return False
-                l += crm_report().pelist(a, long=("v" in opt_l))
+                l += crm_report().pelist(a, verbose=("v" in opt_l))
         else:
-            l = crm_report().pelist(long=("v" in opt_l))
+            l = crm_report().pelist(verbose=("v" in opt_l))
         if not l:
             return False
         s = '\n'.join(l)
@@ -303,6 +295,12 @@ class History(command.UI):
         return xmlutil.pe2shadow(f, name)
 
     @command.skill_level('administrator')
+    def do_transitions(self, context):
+        self._init_source()
+        s = '\n'.join(crm_report().show_transitions())
+        utils.page_string(s)
+
+    @command.skill_level('administrator')
     @command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
                                    compl.choice(['log', 'showdot', 'save'])))
     def do_transition(self, context, *args):
@@ -337,7 +335,10 @@ class History(command.UI):
         elif subcmd == "save":
             rc = self._pe2shadow(f, argl)
         elif subcmd == "tags":
-            rc = crm_report().show_transition_tags(f)
+            tags = crm_report().get_transition_tags(f)
+            rc = tags is not None
+            if rc:
+                print(' '.join(tags) if len(tags) else "No tags.")
         else:
             rc = crm_report().show_transition_log(f, True)
         return rc
@@ -382,7 +383,7 @@ class History(command.UI):
 
     def _pe_config_plain(self, pe_f):
         '''Configuration with no formatting (but with colors).'''
-        return self._pe_config_obj(pe_f).repr(format=0)
+        return self._pe_config_obj(pe_f).repr(format_mode=0)
 
     def _pe_config(self, pe_f):
         '''Formatted configuration.'''
@@ -410,15 +411,11 @@ class History(command.UI):
                        (rc, s))
             return None
         l = s.split('\n')
-        for i, ln in enumerate(l):
-            if ln == "":
-                break
-        try:
-            while l[i] == "":
-                i += 1
-        except:
-            pass
-        return '\n'.join(l[i:])
+        while l and l[0] != "":
+            l = l[1:]
+        while l and l[0] == "":
+            l = l[1:]
+        return '\n'.join(l)
 
     def _get_diff_pe_input(self, t):
         if t != "live":
diff --git a/modules/ui_maintenance.py b/modules/ui_maintenance.py
index 4d6ce0b..8d9a232 100644
--- a/modules/ui_maintenance.py
+++ b/modules/ui_maintenance.py
@@ -3,6 +3,7 @@
 
 from . import command
 from . import completers as compl
+from . import config
 from .cibconfig import cib_factory
 from . import utils
 from . import xmlutil
@@ -71,7 +72,7 @@ class Maintenance(command.UI):
             context.fatal_error("Resource not found: %s" % (resource))
         if not xmlutil.is_resource(obj.node):
             context.fatal_error("Not a resource: %s" % (resource))
-        if not self._in_maintenance_mode(obj):
+        if not config.core.force and not self._in_maintenance_mode(obj):
             context.fatal_error("Not in maintenance mode.")
 
         if ssh is None:
diff --git a/modules/ui_node.py b/modules/ui_node.py
index e752039..c7a3cfc 100644
--- a/modules/ui_node.py
+++ b/modules/ui_node.py
@@ -24,25 +24,25 @@ def unpack_node_xmldata(node, is_offline):
     returns the data to pass to print_node
     is_offline: true|false
     """
-    type = uname = id = ""
+    typ = uname = ident = ""
     inst_attr = []
     other = {}
     for attr in node.keys():
         v = node.get(attr)
         if attr == "type":
-            type = v
+            typ = v
         elif attr == "uname":
             uname = v
         elif attr == "id":
-            id = v
+            ident = v
         else:
             other[attr] = v
     inst_attr = [cli_nvpairs(nvpairs2list(elem))
                  for elem in node.xpath('./instance_attributes')]
-    return uname, id, type, other, inst_attr, is_offline
+    return uname, ident, typ, other, inst_attr, is_offline
 
 
-def print_node(uname, id, node_type, other, inst_attr, offline):
+def print_node(uname, ident, node_type, other, inst_attr, offline):
     """
     Try to pretty print a node from the cib. Sth like:
     uname(id): node_type
@@ -52,10 +52,10 @@ def print_node(uname, id, node_type, other, inst_attr, offline):
     s_offline = offline and "(offline)" or ""
     if not node_type:
         node_type = "normal"
-    if uname == id:
+    if uname == ident:
         print term.render("%s: %s%s" % (uname, node_type, s_offline))
     else:
-        print term.render("%s(%s): %s%s" % (uname, id, node_type, s_offline))
+        print term.render("%s(%s): %s%s" % (uname, ident, node_type, s_offline))
     for a in other:
         print term.render("\t%s: %s" % (a, other[a]))
     for s in inst_attr:
@@ -213,9 +213,12 @@ class NodeMgmt(command.UI):
         if not utils.is_name_sane(node):
             return False
         if not config.core.force and \
-                not utils.ask("Do you really want to shoot %s?" % node):
+                not utils.ask("Fencing %s will shut down the node and migrate any resources that are running on it! Do you want to fence %s?" % (node, node)):
             return False
-        return utils.ext_cmd(self.node_fence % (node)) == 0
+        if xmlutil.is_remote_node(node):
+            return utils.ext_cmd("stonith_admin -F '%s'" % (node)) == 0
+        else:
+            return utils.ext_cmd(self.node_fence % (node)) == 0
 
     @command.wait
     @command.completers(compl.nodes)
diff --git a/modules/ui_options.py b/modules/ui_options.py
index 43744a5..72b743b 100644
--- a/modules/ui_options.py
+++ b/modules/ui_options.py
@@ -99,7 +99,7 @@ class CliOptions(command.UI):
         "usage: output <type>"
         _legacy_set_pref("output", output_type)
         from . import term
-        term._init()
+        term.init()
 
     def do_colorscheme(self, context, colors):
         "usage: colorscheme <colors>"
diff --git a/modules/ui_script.py b/modules/ui_script.py
index b0cd449..61ce3f3 100644
--- a/modules/ui_script.py
+++ b/modules/ui_script.py
@@ -109,8 +109,8 @@ class JsonPrinter(object):
         pass
 
 
-def describe_param(p, name, all):
-    if not all and p.get('advanced'):
+def describe_param(p, name, getall):
+    if not getall and p.get('advanced'):
         return ""
     opt = ' (required) ' if p['required'] else ''
     opt += ' (unique) ' if p['unique'] else ''
@@ -127,7 +127,7 @@ def _scoped_name(context, name):
     return name
 
 
-def describe_step(icontext, context, s, all):
+def describe_step(icontext, context, s, getall):
     ret = "%s. %s" % ('.'.join([str(i + 1) for i in icontext]), scripts.format_desc(s['shortdesc']) or 'Parameters')
     if not s['required']:
         ret += ' (optional)'
@@ -135,9 +135,9 @@ def describe_step(icontext, context, s, all):
     if s.get('name'):
         context = context + [s['name']]
     for p in s.get('parameters', []):
-        ret += describe_param(p, _scoped_name(context, p['name']), all)
+        ret += describe_param(p, _scoped_name(context, p['name']), getall)
     for i, step in enumerate(s.get('steps', [])):
-        ret += describe_step(icontext + [i], context, step, all)
+        ret += describe_step(icontext + [i], context, step, getall)
     return ret
 
 
@@ -159,12 +159,17 @@ def _nvpairs2parameters(args):
         _set(ret, key.split(':'), val)
     return ret
 
+_fixups = {
+    'wizard': 'Legacy Wizards',
+    'sap': 'SAP',
+    'nfs': 'NFS'
+}
+
 
 def _category_pretty(c):
-    if str(c).lower() == 'wizard':
-        return "Wizard (Legacy)"
-    elif str(c).lower() == 'sap':
-        return "SAP"
+    v = _fixups.get(str(c).lower())
+    if v is not None:
+        return v
     return str(c).capitalize()
 
 
@@ -189,7 +194,7 @@ class Script(command.UI):
         for arg in args:
             if arg.lower() not in ("all", "names"):
                 context.fatal_error("Unexpected argument '%s': expected  [all|names]" % (arg))
-        all = any([x for x in args if x.lower() == 'all'])
+        show_all = any([x for x in args if x.lower() == 'all'])
         names = any([x for x in args if x.lower() == 'names'])
         if not names:
             categories = {}
@@ -199,7 +204,7 @@ class Script(command.UI):
                     if script is None:
                         continue
                     cat = script['category'].lower()
-                    if not all and cat == 'script':
+                    if not show_all and cat == 'script':
                         continue
                     cat = _category_pretty(cat)
                     if cat not in categories:
@@ -214,7 +219,7 @@ class Script(command.UI):
                 for s in sorted(lst):
                     print(s)
                 print('')
-        elif all:
+        elif show_all:
             for name in scripts.list_scripts():
                 print(name)
         else:
@@ -230,7 +235,7 @@ class Script(command.UI):
 
     @command.completers_repeating(compl.call(scripts.list_scripts))
     @command.alias('info', 'describe')
-    def do_show(self, context, name, all=None):
+    def do_show(self, context, name, show_all=None):
         '''
         Describe the given script.
         '''
@@ -238,14 +243,14 @@ class Script(command.UI):
         if script is None:
             return False
 
-        all = all == 'all'
+        show_all = show_all == 'all'
 
         vals = {
             'name': script['name'],
             'category': _category_pretty(script['category']),
             'shortdesc': str(script['shortdesc']),
             'longdesc': scripts.format_desc(script['longdesc']),
-            'steps': "\n".join((describe_step([i], [], s, all) for i, s in enumerate(script['steps'])))}
+            'steps': "\n".join((describe_step([i], [], s, show_all) for i, s in enumerate(script['steps'])))}
         output = """%(name)s (%(category)s)
 %(shortdesc)s
 
@@ -253,7 +258,7 @@ class Script(command.UI):
 
 %(steps)s
 """ % vals
-        if all:
+        if show_all:
             output += "Common Parameters\n\n"
             for name, defval, desc in scripts.common_params():
                 output += "  %s\n" % (name)
@@ -370,9 +375,9 @@ class Script(command.UI):
         fromscript = os.path.abspath(workflow)
         tgtdir = outdir
 
-        scripts._build_script_cache()
+        scripts.build_script_cache()
         name = os.path.splitext(os.path.basename(fromscript))[0]
-        script = scripts._load_script_file(name, fromscript)
+        script = scripts.load_script_file(name, fromscript)
         script = flatten(script)
         script["category"] = category
         del script["name"]
@@ -445,11 +450,14 @@ class Script(command.UI):
             return False
         else:
             for action in actions:
-                print(json.dumps({'name': str(action.get('name', '')),
-                                  'shortdesc': str(action.get('shortdesc', '')),
-                                  'longdesc': str(action.get('longdesc', '')),
-                                  'text': str(action.get('text', '')),
-                                  'nodes': str(action.get('nodes', ''))}))
+                obj = {'name': str(action.get('name', '')),
+                       'shortdesc': str(action.get('shortdesc', '')),
+                       'longdesc': str(action.get('longdesc', '')),
+                       'text': str(action.get('text', '')),
+                       'nodes': str(action.get('nodes', ''))}
+                if 'sudo' in action:
+                    obj['sudo'] = action['sudo']
+                print(json.dumps(obj))
         return True
 
     def _json_run(self, context, cmd):
diff --git a/modules/ui_template.py b/modules/ui_template.py
index 936f984..05af423 100644
--- a/modules/ui_template.py
+++ b/modules/ui_template.py
@@ -300,7 +300,7 @@ class Template(command.UI):
             err_buf.incr_lineno()
             if inp.startswith('#'):
                 continue
-            if type(inp) == type(u''):
+            if isinstance(inp, unicode):
                 inp = inp.encode('ascii')
             inp = inp.strip()
             try:
diff --git a/modules/ui_utils.py b/modules/ui_utils.py
index 20cf296..70c04d0 100644
--- a/modules/ui_utils.py
+++ b/modules/ui_utils.py
@@ -9,7 +9,6 @@ from . import utils
 
 
 def _get_attr_cmd(attr_ext_commands, subcmd):
-    attr_ext_commands
     try:
         attr_cmd = attr_ext_commands[subcmd]
         if attr_cmd:
diff --git a/modules/utils.py b/modules/utils.py
index a72aa19..3d40186 100644
--- a/modules/utils.py
+++ b/modules/utils.py
@@ -11,6 +11,9 @@ import time
 import datetime
 import shutil
 import bz2
+import fnmatch
+import gc
+from contextlib import contextmanager
 from . import config
 from . import userdir
 from . import constants
@@ -19,18 +22,25 @@ from . import term
 from .msg import common_warn, common_info, common_debug, common_err, err_buf
 
 
-class memoize:
+def memoize(function):
     "Decorator to invoke a function once only for any argument"
-    def __init__(self, function):
-        self.function = function
-        self.memoized = {}
-
-    def __call__(self, *args):
-        try:
-            return self.memoized[args]
-        except KeyError:
-            self.memoized[args] = self.function(*args)
-            return self.memoized[args]
+    memoized = {}
+    def inner(*args):
+        if args in memoized:
+            return memoized[args]
+        r = function(*args)
+        memoized[args] = r
+        return r
+    return inner
+
+
+ at contextmanager
+def nogc():
+    gc.disable()
+    try:
+        yield
+    finally:
+        gc.enable()
 
 
 getuser = userdir.getuser
@@ -158,10 +168,18 @@ def verify_boolean(opt):
 
 
 def is_boolean_true(opt):
+    if opt in (None, False):
+        return False
+    if opt is True:
+        return True
     return opt.lower() in ("yes", "true", "on", "1")
 
 
 def is_boolean_false(opt):
+    if opt in (None, False):
+        return True
+    if opt is True:
+        return False
     return opt.lower() in ("no", "false", "off", "0")
 
 
@@ -238,7 +256,7 @@ def pipe_string(cmd, s):
         p.communicate(s)
         p.wait()
         rc = p.returncode
-    except IOError, msg:
+    except IOError as msg:
         if "Broken pipe" not in msg:
             common_err(msg)
     return rc
@@ -443,31 +461,31 @@ _LOCKDIR = ".lockdir"
 _PIDF = "pid"
 
 
-def check_locker(dir):
-    if not os.path.isdir(os.path.join(dir, _LOCKDIR)):
+def check_locker(lockdir):
+    if not os.path.isdir(os.path.join(lockdir, _LOCKDIR)):
         return
-    s = file2str(os.path.join(dir, _LOCKDIR, _PIDF))
+    s = file2str(os.path.join(lockdir, _LOCKDIR, _PIDF))
     pid = convert2ints(s)
     if not isinstance(pid, int):
         common_warn("history: removing malformed lock")
-        rmdir_r(os.path.join(dir, _LOCKDIR))
+        rmdir_r(os.path.join(lockdir, _LOCKDIR))
         return
     try:
         os.kill(pid, 0)
     except OSError, (errno, strerror):
         if errno == os.errno.ESRCH:
             common_info("history: removing stale lock")
-            rmdir_r(os.path.join(dir, _LOCKDIR))
+            rmdir_r(os.path.join(lockdir, _LOCKDIR))
         else:
             common_err("%s: %s" % (_LOCKDIR, strerror))
 
 
-def acquire_lock(dir):
-    check_locker(dir)
+def acquire_lock(lockdir):
+    check_locker(lockdir)
     while True:
         try:
-            os.makedirs(os.path.join(dir, _LOCKDIR))
-            str2file("%d" % os.getpid(), os.path.join(dir, _LOCKDIR, _PIDF))
+            os.makedirs(os.path.join(lockdir, _LOCKDIR))
+            str2file("%d" % os.getpid(), os.path.join(lockdir, _LOCKDIR, _PIDF))
             return True
         except OSError, (errno, strerror):
             if errno != os.errno.EEXIST:
@@ -479,8 +497,8 @@ def acquire_lock(dir):
             return False
 
 
-def release_lock(dir):
-    rmdir_r(os.path.join(dir, _LOCKDIR))
+def release_lock(lockdir):
+    rmdir_r(os.path.join(lockdir, _LOCKDIR))
 
 
 def pipe_cmd_nosudo(cmd):
@@ -682,15 +700,15 @@ def run_ptest(graph_s, nograph, scores, utilization, actions, verbosity):
     return True
 
 
-def is_id_valid(id):
+def is_id_valid(ident):
     """
     Verify that the id follows the definition:
     http://www.w3.org/TR/1999/REC-xml-names-19990114/#ns-qualnames
     """
-    if not id:
+    if not ident:
         return False
     id_re = r"^[A-Za-z_][\w._-]*$"
-    return re.match(id_re, id)
+    return re.match(id_re, ident)
 
 
 def check_range(a):
@@ -764,29 +782,13 @@ def sort_by_mtime(l):
     return [x[1] for x in l2]
 
 
-def dirwalk(dir):
-    "walk a directory tree, using a generator"
-    # http://code.activestate.com/recipes/105873/
-    for f in os.listdir(dir):
-        fullpath = os.path.join(dir, f)
-        if os.path.isdir(fullpath) and not os.path.islink(fullpath):
-            for x in dirwalk(fullpath):  # recurse into subdir
-                yield x
-        else:
-            yield fullpath
-
-
-def file_find_by_name(dir, fname):
-    'Find a file within a tree matching fname.'
-    if not dir:
-        common_err("cannot dirwalk nothing!")
-        return None
-    if not fname:
-        common_err("file to find not provided")
-        return None
-    for f in dirwalk(dir):
-        if os.path.basename(f) == fname:
-            return f
+def file_find_by_name(root, filename):
+    'Find a file within a tree matching fname'
+    assert(root)
+    assert(filename)
+    for root, dirnames, filenames in os.walk(root):
+        for filename in fnmatch.filter(filenames, filename):
+            return os.path.join(root, filename)
     return None
 
 
@@ -924,6 +926,15 @@ def page_string(s):
         pipe_string(get_pager_cmd(), term_render(s))
 
 
+def page_gen(g):
+    'Page lines generated by generator g'
+    w, h = get_winsize()
+    if not config.core.pager or not can_ask() or options.batch:
+        for line in g:
+            sys.stdout.write(term_render(line))
+    else:
+        pipe_string(get_pager_cmd(), term_render("".join(g)))
+
 def page_file(filename):
     'Open file in pager'
     if not os.path.isfile(filename):
@@ -1059,6 +1070,15 @@ def datetime_to_timestamp(dt):
         return None
 
 
+def timestamp_to_datetime(ts):
+    """
+    Convert a timestamp into a naive datetime object
+    """
+    import dateutil
+    import dateutil.tz
+    return make_datetime_naive(datetime.datetime.fromtimestamp(ts).replace(tzinfo=dateutil.tz.tzlocal()))
+
+
 def parse_time(t):
     '''
     Try to make sense of the user provided time spec.
@@ -1067,11 +1087,12 @@ def parse_time(t):
 
     Also does time zone elimination by passing the datetime
     through a timestamp conversion if necessary
+
+    TODO: dateutil is very slow, avoid it if possible
     '''
     try:
-        import dateutil.parser
-        import dateutil.tz
-        dt = dateutil.parser.parse(t)
+        from dateutil import parser, tz
+        dt = parser.parse(t)
 
         if datetime_is_aware(dt):
             ts = datetime_to_timestamp(dt)
@@ -1080,9 +1101,9 @@ def parse_time(t):
             dt = datetime.datetime.fromtimestamp(ts)
         else:
             # convert to UTC from local time
-            dt = make_datetime_naive(dt.replace(tzinfo=dateutil.tz.tzlocal()))
+            dt = dt - tz.tzlocal().utcoffset(dt)
     except ValueError, msg:
-        common_err("%s: %s" % (t, msg))
+        common_err("parse_time %s: %s" % (t, msg))
         return None
     except ImportError, msg:
         try:
@@ -1094,6 +1115,35 @@ def parse_time(t):
     return dt
 
 
+def parse_to_timestamp(t):
+    '''
+    Read a string and convert it into a UNIX timestamp.
+    Added as an optimization of parse_time to avoid
+    extra conversion steps when result would be converted
+    into a timestamp anyway
+    '''
+    try:
+        from dateutil import parser, tz
+        dt = parser.parse(t)
+
+        if datetime_is_aware(dt):
+            return datetime_to_timestamp(dt)
+        else:
+            # convert to UTC from local time
+            return total_seconds(dt - tz.tzlocal().utcoffset(dt) - datetime.datetime(1970, 1, 1))
+    except ValueError, msg:
+        common_err("parse_time %s: %s" % (t, msg))
+        return None
+    except ImportError, msg:
+        try:
+            tm = time.strptime(t)
+            dt = datetime.datetime(*tm[0:7])
+            return datetime_to_timestamp(dt)
+        except ValueError, msg:
+            common_err("no dateutil, please provide times as printed by date(1)")
+            return None
+
+
 def save_graphviz_file(ini_f, attr_d):
     '''
     Save graphviz settings to an ini file, if it does not exist.
@@ -1337,6 +1387,25 @@ def resolve_hostnames(hostnames):
     return True, None
 
 
+def list_corosync_node_names():
+    '''
+    Returns list of nodes configured
+    in corosync.conf
+    '''
+    try:
+        cfg = os.getenv('COROSYNC_MAIN_CONFIG_FILE', '/etc/corosync/corosync.conf')
+        lines = open(cfg).read().split('\n')
+        name_re = re.compile(r'\s*name:\s+(.*)')
+        names = []
+        for line in lines:
+            name = name_re.match(line)
+            if name:
+                names.append(name.group(1))
+        return names
+    except Exception:
+        return []
+
+
 def list_corosync_nodes():
     '''
     Returns list of nodes configured
diff --git a/modules/xmlutil.py b/modules/xmlutil.py
index 0dfd31a..555aeff 100644
--- a/modules/xmlutil.py
+++ b/modules/xmlutil.py
@@ -45,7 +45,6 @@ def file2cib_elem(s):
 def compressed_file_to_cib(s):
     try:
         if s.endswith('.bz2'):
-            import bz2
             f = bz2.BZ2File(s)
         elif s.endswith('.gz'):
             import gzip
@@ -130,23 +129,23 @@ def read_cib(fun, params=None):
     return cib_elem
 
 
-def sanity_check_nvpairs(id, node, attr_list):
+def sanity_check_nvpairs(ident, node, attr_list):
     rc = 0
     for nvpair in node.iterchildren("nvpair"):
         n = nvpair.get("name")
         if n and n not in attr_list:
-            common_err("%s: attribute %s does not exist" % (id, n))
+            common_err("%s: attribute %s does not exist" % (ident, n))
             rc |= utils.get_check_rc()
     return rc
 
 
-def sanity_check_meta(id, node, attr_list):
+def sanity_check_meta(ident, node, attr_list):
     rc = 0
     if node is None or not attr_list:
         return rc
     for c in node.iterchildren():
         if c.tag == "meta_attributes":
-            rc |= sanity_check_nvpairs(id, c, attr_list)
+            rc |= sanity_check_nvpairs(ident, c, attr_list)
     return rc
 
 
@@ -197,7 +196,7 @@ class RscState(object):
         self.prop_elem = get_first_conf_elem(cib, "crm_config/cluster_property_set")
         self.rsc_dflt_elem = get_first_conf_elem(cib, "rsc_defaults/meta_attributes")
 
-    def rsc2node(self, id):
+    def rsc2node(self, ident):
         '''
         Get a resource XML element given the id.
         NB: this is called from almost all other methods.
@@ -209,27 +208,27 @@ class RscState(object):
         if self.rsc_elem is None:
             return None
         # does this need to be optimized?
-        expr = './/*[@id="%s"]' % id
+        expr = './/*[@id="%s"]' % ident
         try:
             return self.rsc_elem.xpath(expr)[0]
         except (IndexError, AttributeError):
             return None
 
-    def is_ms(self, id):
+    def is_ms(self, ident):
         '''
         Test if the resource is master-slave.
         '''
-        rsc_node = self.rsc2node(id)
+        rsc_node = self.rsc2node(ident)
         if rsc_node is None:
             return False
         return is_ms(rsc_node)
 
-    def rsc_clone(self, id):
+    def rsc_clone(self, ident):
         '''
         Return id of the clone/ms containing this resource
         or None if it's not cloned.
         '''
-        rsc_node = self.rsc2node(id)
+        rsc_node = self.rsc2node(ident)
         if rsc_node is None:
             return None
         pnode = rsc_node.getparent()
@@ -241,11 +240,11 @@ class RscState(object):
             return pnode.get("id")
         return None
 
-    def is_managed(self, id):
+    def is_managed(self, ident):
         '''
         Is this resource managed?
         '''
-        rsc_node = self.rsc2node(id)
+        rsc_node = self.rsc2node(ident)
         if rsc_node is None:
             return False
         # maintenance-mode, if true, overrides all
@@ -267,31 +266,31 @@ class RscState(object):
             return is_xs_boolean_true(attr)
         return True
 
-    def is_running(self, id):
+    def is_running(self, ident):
         '''
         Is this resource running?
         '''
         if not is_live_cib():
             return False
-        test_id = self.rsc_clone(id) or id
+        test_id = self.rsc_clone(ident) or ident
         rc, outp = get_stdout(self.rsc_status % test_id, stderr_on=False)
         return outp.find("running") > 0 and outp.find("NOT") == -1
 
-    def is_group(self, id):
+    def is_group(self, ident):
         '''
         Test if the resource is a group
         '''
-        rsc_node = self.rsc2node(id)
+        rsc_node = self.rsc2node(ident)
         if rsc_node is None:
             return False
         return is_group(rsc_node)
 
-    def can_delete(self, id):
+    def can_delete(self, ident):
         '''
         Can a resource be deleted?
         The order below is important!
         '''
-        return not (self.is_running(id) and not self.is_group(id) and self.is_managed(id))
+        return not (self.is_running(ident) and not self.is_group(ident) and self.is_managed(ident))
 
 
 def resources_xml():
@@ -357,14 +356,22 @@ def is_our_node(s):
     return False
 
 
+def is_remote_node(n):
+    cib = cibdump2elem()
+    if cib is None:
+        return False
+    remote_nodes = cib.xpath('/cib/status/node_state[@remote_node="true"]/@uname')
+    return any(n == r for r in remote_nodes if r)
+
+
 def is_live_cib():
     '''We working with the live cluster?'''
     return not get_cib_in_use() and not os.getenv("CIB_file")
 
 
 def is_crmuser():
-    return (config.core.user in ("root", config.path.crm_daemon_user)
-            or userdir.getuser() in ("root", config.path.crm_daemon_user))
+    crmusers = ("root", config.path.crm_daemon_user)
+    return config.core.user in crmusers or userdir.getuser() in crmusers
 
 
 def cib_shadow_dir():
@@ -379,10 +386,10 @@ def cib_shadow_dir():
 
 
 def listshadows():
-    dir = cib_shadow_dir()
-    if not os.path.isdir(dir):
+    d = cib_shadow_dir()
+    if not os.path.isdir(d):
         return []
-    rc, l = stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\\.//'" % dir)
+    rc, l = stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\\.//'" % d)
     return l
 
 
@@ -409,8 +416,8 @@ def pe2shadow(pe_file, name):
     return True
 
 
-def is_xs_boolean_true(bool):
-    return bool.lower() in ("true", "1")
+def is_xs_boolean_true(b):
+    return b.lower() in ("true", "1")
 
 
 def cloned_el(node):
@@ -630,9 +637,9 @@ def rmnodes(e_list):
 
 def printid(e_list):
     for e in e_list:
-        id = e.get("id")
-        if id:
-            print "element id:", id
+        ident = e.get("id")
+        if ident:
+            print "element id:", ident
 
 
 def remove_dflt_attrs(e_list):
@@ -977,7 +984,7 @@ def rset_convert(c_obj):
             del rset.attrib["sequential"]
         rsetcnt += 1
     c_obj.modified = True
-    cli = c_obj.repr_cli(format=-1)
+    cli = c_obj.repr_cli(format_mode=-1)
     cli = cli.replace("_rsc_set_ ", "")
     newnode = c_obj.cli2node(cli)
     if newnode is not None:
diff --git a/requirements.txt b/requirements.txt
index 18f3eb9..39815a6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,3 +2,4 @@ lxml
 PyYAML
 nosexcover
 python-dateutil
+parallax
diff --git a/scripts/apache/main.yml b/scripts/apache/main.yml
index 228c568..9af548d 100644
--- a/scripts/apache/main.yml
+++ b/scripts/apache/main.yml
@@ -9,12 +9,12 @@ longdesc: |
   Configure a resource group containing a virtual IP address and
   an instance of the Apache web server.
 
-  You can optionally configure a Filesystem resource which will be
+  You can optionally configure a file system resource which will be
   mounted before the web server is started.
 
   You can also optionally configure a database resource which will
   be started before the web server but after mounting the optional
-  filesystem.
+  file system.
 include:
   - agent: ocf:heartbeat:apache
     name: apache
@@ -34,7 +34,7 @@ include:
       - name: id
         value: "{{id}}-vip"
   - script: filesystem
-    shortdesc: Optional filesystem mounted before the web server is started.
+    shortdesc: Optional file system mounted before the web server is started.
     required: false
   - script: database
     shortdesc: Optional database started before the web server is started.
@@ -56,6 +56,7 @@ actions:
   - call: a2enmod status; true
     shortdesc: Enable status module
     when: install
+    sudo: true
   - include: filesystem
   - include: database
   - include: virtual-ip
diff --git a/scripts/clvm-vg/main.yml b/scripts/clvm-vg/main.yml
index e092618..8cb196f 100644
--- a/scripts/clvm-vg/main.yml
+++ b/scripts/clvm-vg/main.yml
@@ -2,7 +2,7 @@
 #
 # License: GNU General Public License (GPL)
 version: 2.2
-category: Filesystem
+category: File System
 shortdesc: Cluster-aware LVM (Volume Group)
 longdesc: |
   Configures an cLVM volume group instance. Once created,
diff --git a/scripts/clvm/main.yml b/scripts/clvm/main.yml
index 987c47b..de54b2d 100644
--- a/scripts/clvm/main.yml
+++ b/scripts/clvm/main.yml
@@ -2,7 +2,7 @@
 #
 # License: GNU General Public License (GPL)
 version: 2.2
-category: Filesystem
+category: File System
 shortdesc: Cluster-aware LVM
 longdesc: |
   Configure a cloned instance of cLVM.
diff --git a/scripts/db2/main.yml b/scripts/db2/main.yml
index 5eb2d92..95e7461 100644
--- a/scripts/db2/main.yml
+++ b/scripts/db2/main.yml
@@ -2,9 +2,9 @@ version: 2.2
 category: Database
 shortdesc: IBM DB2 Database
 longdesc: >-
-  Configure an IBM DB2 database resource, along with a Virtual IP and a Filesystem.
+  Configure an IBM DB2 database resource, along with a Virtual IP and a file system mount point.
 
-  Note that the filesystem will be stopped initially, in case you need to run mkfs.
+  Note that the file system resource will be stopped initially, in case you need to run mkfs.
 
 include:
   - agent: ocf:heartbeat:db2
@@ -25,7 +25,7 @@ include:
       - name: id
         value: db2-virtual-ip
   - script: filesystem
-    shortdesc: The filesystem configured here will be mounted before the DB2 instance.
+    shortdesc: The file system configured here will be mounted before the DB2 instance.
     parameters:
       - name: id
         value: db2-fs
diff --git a/scripts/drbd/main.yml b/scripts/drbd/main.yml
index 4e7d4a1..afb4f8b 100644
--- a/scripts/drbd/main.yml
+++ b/scripts/drbd/main.yml
@@ -1,5 +1,5 @@
 version: 2.2
-category: Filesystem
+category: File System
 shortdesc: DRBD Block Device
 longdesc: >-
   Distributed Replicated Block Device. Configure a DRBD cluster resource.
diff --git a/scripts/exportfs/main.yml b/scripts/exportfs/main.yml
index cd0dfea..6dff8f2 100644
--- a/scripts/exportfs/main.yml
+++ b/scripts/exportfs/main.yml
@@ -1,31 +1,33 @@
 version: 2.2
 shortdesc: "NFS Exported File System"
-category: Server
+category: NFS
 include:
   - agent: ocf:heartbeat:exportfs
     parameters:
       - name: id
         required: true
-        shortdesc: Unique ID for this export in the cluster.
+        shortdesc: Resource ID
+        longdesc: Cluster Resource ID
         type: resource
         value: exportfs
       - name: fsid
+        shortdesc: Unique FSID Within Cluster or Starting FSID for Multiple Exports
         required: true
         type: integer
         value: 1
       - name: directory
         required: true
         type: string
-        shortdesc: Mount point
-        longdesc: "The mount point for the filesystem, e.g.: /srv/nfs/home"
+        shortdesc: Mount Point (Directory)
+        longdesc: "The mount point for the file system, e.g.: /srv/nfs/home"
       - name: options
         required: true
-        shortdesc: Mount options
+        shortdesc: Mount Options
         longdesc: "Any additional options to be given to the mount command, for example rw,mountpoint"
         type: string
       - name: wait_for_leasetime_on_stop
         required: false
-        shortdesc: Wait for lease time on stop
+        shortdesc: Wait for Lease Time on Stop
         longdesc: If set to true, wait for lease on stop.
         type: boolean
         value: true
diff --git a/scripts/filesystem/main.yml b/scripts/filesystem/main.yml
index 23b9479..b37cf15 100644
--- a/scripts/filesystem/main.yml
+++ b/scripts/filesystem/main.yml
@@ -1,6 +1,6 @@
 version: 2.2
-category: Filesystem
-shortdesc: Filesystem (mount point)
+category: File System
+shortdesc: File System (mount point)
 include:
   - agent: ocf:heartbeat:Filesystem
     name: filesystem
diff --git a/scripts/gfs2-base/main.yml b/scripts/gfs2-base/main.yml
index 1fc515e..47afe0b 100644
--- a/scripts/gfs2-base/main.yml
+++ b/scripts/gfs2-base/main.yml
@@ -3,11 +3,11 @@
 #
 # License: GNU General Public License (GPL)
 version: 2.2
-category: Filesystem
-shortdesc: gfs2 filesystem base (cloned)
+category: Script
+shortdesc: GFS2 File System Base (Cloned)
 longdesc: |
-  This template generates a cloned instance of the gfs2 filesystem.
-  The filesystem should be on the device, unless clvm is used.
+  This template generates a cloned instance of the GFS2 file system.
+  The file system should be on the device, unless cLVM is used.
 
 parameters:
   - name: clvm-group
@@ -20,7 +20,7 @@ actions:
       primitive gfs-controld ocf:pacemaker:controld
 
       clone c-gfs gfs-controld
-        meta interleave="true" ordered="true"
+        meta interleave=true ordered=true
 
   - crm: configure modgroup {{clvm-group}} add c-gfs
     shortdesc: Add gfs controld to cLVM group
diff --git a/scripts/gfs2/main.yml b/scripts/gfs2/main.yml
index 2df004f..80f6994 100644
--- a/scripts/gfs2/main.yml
+++ b/scripts/gfs2/main.yml
@@ -3,49 +3,47 @@
 #
 # License: GNU General Public License (GPL)
 version: 2.2
-shortdesc: gfs2 filesystem (cloned)
+shortdesc: GFS2 File System (Cloned)
 longdesc: >- 
-  This template generates a cloned instance of the gfs2 filesystem.
-  The filesystem should be on the device, unless cLVM is used.
+  This template generates a cloned instance of the GFS2 file system.
+  The file system should be on the device, unless cLVM is used.
 
-category: Filesystem
+category: File System
 include:
   - script: gfs2-base
 parameters:
   - name: id
-    shortdesc: Name the gfs2 filesystem
+    shortdesc: File System Resource ID
     longdesc: "NB: The clone is going to be named c-<id> (e.g. c-bigfs)"
     example: bigfs
     required: true
     type: resource
   - name: directory
-    shortdesc: The mount point
+    shortdesc: Mount Point
     example: /mnt/bigfs
     required: true
     type: string
   - name: device
-    shortdesc: The device
+    shortdesc: Device
     required: true
     type: string
   - name: options
-    shortdesc: mount options
+    shortdesc: Mount Options
     type: string
     required: false
 actions:
   - include: gfs2-base
   - cib: |
-      primitive {{id}} ocf:heartbeat:Filesystem
-        params
+      primitive {{id}} Filesystem
         directory="{{directory}}"
-        fstype="gfs2"
         device="{{device}}"
+        fstype=gfs2
         {{#options}}options="{{options}}"{{/options}}
-
-      monitor {{id}} 20:40
+        op monitor interval=20s timeout=40s
 
       clone c-{{id}} {{id}}
-        meta interleave="true" ordered="true"
+        meta interleave=true ordered=true
 
   - crm: "configure modgroup {{gfs2-base:clvm-group}} add c-{{id}}"
-    shortdesc: Add cloned filesystem to cLVM group
+    shortdesc: Add cloned file system to cLVM group
     when: "{{gfs2-base:clvm-group}}"
diff --git a/scripts/health/collect.py b/scripts/health/collect.py
index fa5fe8c..d6fa94b 100755
--- a/scripts/health/collect.py
+++ b/scripts/health/collect.py
@@ -27,7 +27,7 @@ def sys_info():
     #processes. The last column displays the last process ID used.
     system, node, release, version, machine, processor = platform.uname()
     distname, distver, distid = platform.linux_distribution()
-    hostname = platform.node().split('.')[0]
+    hostname = os.uname()[1]
 
     uptime = open('/proc/uptime').read().split()
     loadavg = open('/proc/loadavg').read().split()
diff --git a/scripts/init/configure.py b/scripts/init/configure.py
index 1b87ecf..26ca6a1 100755
--- a/scripts/init/configure.py
+++ b/scripts/init/configure.py
@@ -11,7 +11,7 @@ def _authorize_key(keypath):
     if os.path.exists('/root/.ssh/authorized_keys'):
         pubkey = open(pubkeypath).read()
         if pubkey not in open('/root/.ssh/authorized_keys').read():
-            crm_script.sudo_call("cat %s >> /root/.ssh/authorized_keys" % (pubkeypath))
+            crm_script.sudo_call("cat %s >> /root/.ssh/authorized_keys" % (pubkeypath), shell=True)
     else:
         crm_script.sudo_call(["cp", pubkeypath, '/root/.ssh/authorized_keys'])
 
diff --git a/scripts/libvirt/main.yml b/scripts/libvirt/main.yml
index a5b58e0..1f529a0 100644
--- a/scripts/libvirt/main.yml
+++ b/scripts/libvirt/main.yml
@@ -9,12 +9,12 @@ longdesc: >
 category: Stonith
 parameters:
   - name: id
-    shortdesc: The resource id (name)
+    shortdesc: Resource ID (Name)
     example: stonith-libvirt
     required: true
     type: resource
   - name: target
-    shortdesc: Node to manage with stonith device
+    shortdesc: Node to Manage With STONITH Device
     type: resource
     required: true
   - name: hostlist
@@ -39,13 +39,13 @@ parameters:
     required: false
     example: power_cycle
     type: string
-    shortdesc: How to reset a guest.
+    shortdesc: Guest Reset Method
     longdesc: >
       A guest reset may be done by a sequence of off and on commands
       (power_cycle) or by the reboot command. Which method works
       depend on the hypervisor and guest configuration management.
   - name: install
-    shortdesc: Enable to install required packages
+    shortdesc: Enable to Install Required Packages
     type: boolean
     required: false
     value: false
diff --git a/scripts/mailto/main.yml b/scripts/mailto/main.yml
index 403ffc4..bcf188e 100644
--- a/scripts/mailto/main.yml
+++ b/scripts/mailto/main.yml
@@ -1,5 +1,7 @@
 version: 2.2
-shortdesc: MailTo
+shortdesc: E-Mail
+longdesc: |
+  Notifies recipient by e-mail in the event of a resource takeover.
 category: Basic
 include:
   - agent: ocf:heartbeat:MailTo
diff --git a/scripts/nfsserver/main.yml b/scripts/nfsserver/main.yml
index 2199414..af174f0 100644
--- a/scripts/nfsserver/main.yml
+++ b/scripts/nfsserver/main.yml
@@ -2,17 +2,17 @@
 #
 # License: GNU General Public License (GPL)
 version: 2.2
-category: Server
+category: NFS
 shortdesc: NFS Server
-longdesc: >
-  Configure an NFS server. Requires an existing filesystem resource,
-  for example a filesystem running on LVM on DRBD.
+longdesc: |
+  Configure an NFS server. Requires an existing file system resource,
+  for example a file system running on LVM on DRBD.
 
 parameters:
   - name: base-id
     required: true
-    shortdesc: Base filesystem resource ID
-    longdesc: The ID of an existing filesystem resource.
+    shortdesc: Base File System Resource ID
+    longdesc: The ID of an existing file system resource.
     type: resource
     value: base-fs
 
@@ -20,7 +20,7 @@ include:
   - name: rootfs
     script: exportfs
     required: false
-    shortdesc: NFSv4 Virtual File System root.
+    shortdesc: NFSv4 Virtual File System Root
     parameters:
       - name: id
         value: exportfs-root
@@ -33,7 +33,7 @@ include:
 
   - script: exportfs
     required: true
-    shortdesc: Exported NFS mount point.
+    shortdesc: Exported NFS Mount Point
     parameters:
       - name: id
         value: exportfs
@@ -46,13 +46,13 @@ include:
 
   - script: virtual-ip
     required: false
-    shortdesc: Configure a Virtual IP address used to access the NFS mounts.
+    shortdesc: Virtual IP Address Used to Access the NFS Mounts
 
 actions:
   - crm: "configure show {{base-id}}"
-    shortdesc: Ensure that the Filesystem resource exists
+    shortdesc: Ensure That the File System Resource Exists
   - install: nfs-client nfs-kernel-server
-    shortdesc: Install NFS packages
+    shortdesc: Install NFS Packages
   - service:
       - nfsserver: enable
       - nfsserver: start
@@ -70,4 +70,4 @@ actions:
       {{/rootfs}}
   - call: exportfs -v
     error: Failed to configure NFS exportfs
-    shortdesc: Check result of exportfs -v
+    shortdesc: Check Result of exportfs -v
diff --git a/scripts/ocfs2/main.yml b/scripts/ocfs2/main.yml
index 436bde0..c3000dd 100644
--- a/scripts/ocfs2/main.yml
+++ b/scripts/ocfs2/main.yml
@@ -3,54 +3,74 @@
 #
 # License: GNU General Public License (GPL)
 version: 2.2
-category: Filesystem
-shortdesc: OCFS2 filesystem (cloned)
-longdesc: >
-  Configure a cloned cluster resource for an OCFS2 filesystem.
+category: File System
+shortdesc: OCFS2 File System
+longdesc: |
+  Configure an OCFS2 File System resource and add
+  it to a cloned DLM base group. OCFS2 uses the
+  cluster membership services from Pacemaker which
+  run in user space. Therefore, DLM needs to be
+  configured as a clone resource that is present on
+  each node in the cluster.
 
-  Note that the OCFS2 Filesystem will be stopped initially, in case
-  you need to run mkfs to create the filesystem after DLM is running.
+  The file system resource should be added to a cloned
+  group which includes the DLM resource. This wizard
+  can optionally create both the required DLM resource
+  and the cloned group. The wizard can be reused to create
+  additional OCFS2 file system resources by setting the
+  group name to the name of an already-created cloned group.
+
+  If you are using cLVM, create the DLM resource and clone
+  group using the cLVM wizard. OCFS2 file system resources can
+  then be added to the group using this wizard.
 
 parameters:
   - name: id
-    shortdesc: Name the ocfs2 filesystem resource
+    shortdesc: OCFS2 File System Resource ID
     example: bigfs
     type: resource
     required: true
   - name: directory
-    shortdesc: The mount point
+    shortdesc: Mount Point
     example: /mnt/bigfs
     type: string
     required: true
   - name: device
-    shortdesc: The device
+    shortdesc: Device
     type: string
     required: true
   - name: options
-    shortdesc: mount options
+    shortdesc: Mount Options
     type: string
-  - name: clvm-group
-    shortdesc: cLVM Resource Group ID
-    longdesc: Optional ID of a cLVM resource group to add this filesystem to.
+  - name: dlm
+    shortdesc: Create DLM Resource and Cloned Group
+    longdesc: If set, create the DLM resource and cloned resource group.
+    type: boolean
+    default: true
+  - name: group
+    shortdesc: Cloned Group Resource ID
+    longdesc: ID of cloned group
+    required: false
     type: resource
-    required: False
+    default: g-dlm
 
 actions:
+  - when: dlm
+    cib: |
+      primitive dlm ocf:pacemaker:controld
+        op start timeout=90
+        op stop timeout=60
+      group {{group}} dlm
+      clone c-dlm {{group}} meta interleave=true
   - cib: |
       primitive {{id}} ocf:heartbeat:Filesystem
-          params
-              directory="{{directory}}"
-              fstype="ocfs2"
-              device="{{device}}"
-              {{#options}}options="{{options}}"{{/options}}
+          directory="{{directory}}"
+          fstype="ocfs2"
+          device="{{device}}"
+          {{#options}}options="{{options}}"{{/options}}
           op start timeout=60s
           op stop timeout=60s
           op monitor interval=20s timeout=40s
 
-      clone c-{{id}} {{id}}
-        meta interleave=true target-role=Stopped
-
-  - crm: configure modgroup {{clvm-group}} add c-{{id}}
-    shortdesc: Add cloned OCFS2 filesystem to cLVM group
-    when: clvm-group
-
+  - crm: configure modgroup {{group}} add {{id}}
+    shortdesc: Add the OCFS2 File System to the Cloned Group
diff --git a/scripts/raid-lvm/main.yml b/scripts/raid-lvm/main.yml
index 6a02368..405168f 100644
--- a/scripts/raid-lvm/main.yml
+++ b/scripts/raid-lvm/main.yml
@@ -1,11 +1,11 @@
 version: 2.2
-category: Filesystem
-shortdesc: RAID hosting LVM
+category: File System
+shortdesc: RAID Hosting LVM
 longdesc: "Configure a RAID 1 host based mirror together with a cluster manager LVM volume group and LVM volumes."
 parameters:
   - name: id
-    shortdesc: ID for the RAID and LVM group.
-    longdesc: Filesystems that should be mounted in the LVM can be added to this group resource.
+    shortdesc: RAID and LVM Group ID
+    longdesc: File systems that should be mounted in the LVM can be added to this group resource.
     type: resource
     value: g-raid
     required: true
diff --git a/scripts/sap-as/main.yml b/scripts/sap-as/main.yml
index 08e6084..ccb857e 100644
--- a/scripts/sap-as/main.yml
+++ b/scripts/sap-as/main.yml
@@ -6,7 +6,7 @@ longdesc: |
 
   1) Virtual IP address for the SAP ASCS instance,
 
-  2) A filesystem on shared storage (/usr/sap/SID/ASCS##),
+  2) A file system on shared storage (/usr/sap/SID/ASCS##),
 
   3) SAPInstance for ASCS.
 
@@ -41,12 +41,12 @@ include:
       - name: nic
         value: eth0
   - script: filesystem
-    shortdesc: "Filesystem resource for the /usr/sap/SID/ASCS## directory."
+    shortdesc: "File system resource for the /usr/sap/SID/ASCS## directory"
     longdesc: >-
-      If a filesystem does not already exist on the block device 
+      If a file system does not already exist on the block device 
       specified here, you will need to run mkfs to create it, prior 
-      to starting the filesystem resource.  You will also need
-      to create the mountpoint directory on all cluster nodes.
+      to starting the file system resource.  You will also need
+      to create the mount point directory on all cluster nodes.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapna0as
diff --git a/scripts/sap-ci/main.yml b/scripts/sap-ci/main.yml
index 69c4e78..7c3468d 100644
--- a/scripts/sap-ci/main.yml
+++ b/scripts/sap-ci/main.yml
@@ -6,7 +6,7 @@ longdesc: |
 
   1) Virtual IP address for the SAP Central instance,
 
-  2) A filesystem on shared storage (/usr/sap/SID/DVEBMGS##),
+  2) A file system on shared storage (/usr/sap/SID/DVEBMGS##),
 
   3) SAPInstance for the Central Instance.
 
@@ -41,12 +41,12 @@ include:
       - name: nic
         value: eth0
   - script: filesystem
-    shortdesc: "Filesystem resource for the /usr/sap/SID/DVEBMGS## directory."
+    shortdesc: "File system resource for the /usr/sap/SID/DVEBMGS## directory."
     longdesc: >-
-      If a filesystem does not already exist on the block device 
+      If a file system does not already exist on the block device 
       specified here, you will need to run mkfs to create it, prior 
-      to starting the filesystem resource.  You will also need
-      to create the mountpoint directory on all cluster nodes.
+      to starting the file system resource.  You will also need
+      to create the mount point directory on all cluster nodes.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapna0ci
diff --git a/scripts/sap-db/main.yml b/scripts/sap-db/main.yml
index 50ecad8..b472f3f 100644
--- a/scripts/sap-db/main.yml
+++ b/scripts/sap-db/main.yml
@@ -6,7 +6,7 @@ longdesc: |
 
   1) A virtual IP address for the SAP database instance,
 
-  2) A filesystem on shared storage (/sapdb),
+  2) A file system on shared storage (/sapdb),
 
   3) SAPinstance for the database.
 
@@ -34,12 +34,12 @@ include:
       - name: nic
         value: eth0
   - script: filesystem
-    shortdesc: "Filesystem resource for the SAP database (typically /sapdb)."
+    shortdesc: "File system resource for the SAP database (typically /sapdb)."
     longdesc: >-
-      If a filesystem does not already exist on the block device 
+      If a file system does not already exist on the block device 
       specified here, you will need to run mkfs to create it, prior 
-      to starting the filesystem resource.  You will also need
-      to create the mountpoint directory on all cluster nodes.
+      to starting the file system resource.  You will also need
+      to create the mount point directory on all cluster nodes.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapna0db
diff --git a/scripts/sap-simple-stack-plus/main.yml b/scripts/sap-simple-stack-plus/main.yml
index 237f59a..3f1e996 100644
--- a/scripts/sap-simple-stack-plus/main.yml
+++ b/scripts/sap-simple-stack-plus/main.yml
@@ -10,12 +10,12 @@ longdesc: |
 
   3) A cluster manager LVM volume group and LVM volumes on the RAID 1 host based mirror,
 
-  4) Filesystems on shared storage for sapmnt, /sapbd, /usr/sap/SID/ASCS## and /usr/sap/SID/DVEBMGS##,
+  4) File systems on shared storage for sapmnt, /sapbd, /usr/sap/SID/ASCS## and /usr/sap/SID/DVEBMGS##,
 
   5) SAPinstance for - ASCS, a Database, a Central Instance.
 
   The difference between this and the SimpleStack is that the ASCS and CI have their own
-  volumes/filesystems/mountpoints rather than just one volume/filesystem/mountpoint on /usr/sap.
+  volumes / file systems / mount points rather than just one volume / file system / mount point on /usr/sap.
 
 parameters:
   - name: id
@@ -36,7 +36,7 @@ include:
 
   - script: lvm
     required: true
-    shortdesc: LVM logical volumes for the SAP filesystems.
+    shortdesc: LVM logical volumes for the SAP file systems.
     parameters:
       - name: volgrpname
         value: sapvg
@@ -44,7 +44,7 @@ include:
   - script: filesystem
     name: filesystem-sapmnt
     required: true
-    shortdesc: Filesystem resource for the sapmnt directory.
+    shortdesc: File system resource for the sapmnt directory.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapmnt
@@ -59,7 +59,7 @@ include:
   - script: filesystem
     name: filesystem-usrsap
     required: true
-    shortdesc: Filesystem resource for the /usr/sap directory.
+    shortdesc: File system resource for the /usr/sap directory.
     parameters:
       - name: id
         value: rsc_fs_NA0_usrsap
@@ -89,12 +89,12 @@ include:
         value: eth0
   - script: filesystem
     name: filesystem-db
-    shortdesc: "Filesystem resource for the SAP database (typically /sapdb)."
+    shortdesc: "File system resource for the SAP database (typically /sapdb)."
     longdesc: >-
-      If a filesystem does not already exist on the block device 
+      If a file system does not already exist on the block device 
       specified here, you will need to run mkfs to create it, prior 
-      to starting the filesystem resource.  You will also need
-      to create the mountpoint directory on all cluster nodes.
+      to starting the file system resource.  You will also need
+      to create the mount point directory on all cluster nodes.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapna0db
@@ -131,12 +131,12 @@ include:
         value: eth0
   - script: filesystem
     name: filesystem-as
-    shortdesc: "Filesystem resource for the /usr/sap/SID/ASCS## directory."
+    shortdesc: "File system resource for the /usr/sap/SID/ASCS## directory."
     longdesc: >-
-      If a filesystem does not already exist on the block device 
+      If a file system does not already exist on the block device 
       specified here, you will need to run mkfs to create it, prior 
-      to starting the filesystem resource.  You will also need
-      to create the mountpoint directory on all cluster nodes.
+      to starting the file system resource.  You will also need
+      to create the mount point directory on all cluster nodes.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapna0as
@@ -173,12 +173,12 @@ include:
         value: eth0
   - script: filesystem
     name: filesystem-ci
-    shortdesc: "Filesystem resource for the /usr/sap/SID/DVEBMGS## directory."
+    shortdesc: "File system resource for the /usr/sap/SID/DVEBMGS## directory."
     longdesc: >-
-      If a filesystem does not already exist on the block device 
+      If a file system does not already exist on the block device 
       specified here, you will need to run mkfs to create it, prior 
-      to starting the filesystem resource.  You will also need
-      to create the mountpoint directory on all cluster nodes.
+      to starting the file system resource.  You will also need
+      to create the mount point directory on all cluster nodes.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapna0ci
diff --git a/scripts/sap-simple-stack/main.yml b/scripts/sap-simple-stack/main.yml
index a6bf0e2..654dd47 100644
--- a/scripts/sap-simple-stack/main.yml
+++ b/scripts/sap-simple-stack/main.yml
@@ -1,7 +1,7 @@
 ---
 version: 2.2
 category: SAP
-shortdesc: SAP Simple Stack Instance
+shortdesc: SAP SimpleStack Instance
 longdesc: |
   Configure a SAP instance including:
 
@@ -11,7 +11,7 @@ longdesc: |
 
   3) A cluster manager LVM volume group and LVM volumes on the RAID 1 host based mirror,
 
-  4) Filesystems on shared storage for sapmnt, /sapbd and /usr/sap,
+  4) File systems on shared storage for sapmnt, /sapbd and /usr/sap,
 
   5) SAPinstance for - ASCS, a Database, a Central Instance.
 
@@ -34,7 +34,7 @@ include:
 
   - script: lvm
     required: true
-    shortdesc: LVM logical volumes for the SAP filesystems.
+    shortdesc: LVM logical volumes for the SAP file systems.
     parameters:
       - name: volgrpname
         value: sapvg
@@ -42,7 +42,7 @@ include:
   - script: filesystem
     name: filesystem-sapmnt
     required: true
-    shortdesc: Filesystem resource for the sapmnt directory.
+    shortdesc: File system resource for the sapmnt directory.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapmnt
@@ -57,7 +57,7 @@ include:
   - script: filesystem
     name: filesystem-usrsap
     required: true
-    shortdesc: Filesystem resource for the /usr/sap directory.
+    shortdesc: File system resource for the /usr/sap directory.
     parameters:
       - name: id
         value: rsc_fs_NA0_usrsap
@@ -88,12 +88,12 @@ include:
 
   - script: filesystem
     name: filesystem-db
-    shortdesc: "Filesystem resource for the SAP database (typically /sapdb)."
+    shortdesc: "File system resource for the SAP database (typically /sapdb)."
     longdesc: >-
-      If a filesystem does not already exist on the block device 
+      If a file system does not already exist on the block device 
       specified here, you will need to run mkfs to create it, prior 
-      to starting the filesystem resource.  You will also need
-      to create the mountpoint directory on all cluster nodes.
+      to starting the file system resource.  You will also need
+      to create the mount point directory on all cluster nodes.
     parameters:
       - name: id
         value: rsc_fs_NA0_sapna0db
diff --git a/scripts/sbd/main.yml b/scripts/sbd/main.yml
index f24da70..bbb4f4c 100644
--- a/scripts/sbd/main.yml
+++ b/scripts/sbd/main.yml
@@ -9,36 +9,20 @@ longdesc: |
   Create a SBD STONITH resource. SBD must be configured to use
   a particular shared storage device using /etc/sysconfig/sbd.
 
-  You need to configure an SBD resource for each node to manage.
-
-  There is quite a bit more to do to make this stonith operational.
+  There is quite a bit more to do to make this SBD operational.
   See http://www.linux-ha.org/wiki/SBD_Fencing for information, or
   the sbd(8) manual page.
 
 parameters:
   - name: id
-    shortdesc: The resource id (name)
+    shortdesc: Resource ID (Name)
     example: stonith-sbd
     required: true
     type: resource
-  - name: node
-    shortdesc: The node id that this stonith resource manages.
-    required: true
-    type: resource
-  - name: sbd_device
-    shortdesc: Name of the device (shared disk)
-    longdesc: >
-      NB: Make sure that the device remains the same on reboots. It's
-      preferable to use udev generated names rather than the usual
-      /dev/sd?
-    type: string
-    required: true
 
 actions:
   - cib: |
       primitive {{id}} stonith:external/sbd
-        params sbd_device="{{sbd_device}}"
-        op monitor interval=15s timeout=60s
-        op start timeout=60s
+        op start start-delay=15s timeout=60s
 
-      location loc-{{id}}-fences-{{node}} {{id}} -inf: {{node}}
+      property stonith-enabled=true
diff --git a/setup.py b/setup.py
index 2b4772e..f4a3ef4 100644
--- a/setup.py
+++ b/setup.py
@@ -7,10 +7,10 @@ import os
 SRC_PATH = os.path.relpath(os.path.join(os.path.dirname(__file__), "modules"))
 
 setup(name='crmsh',
-      version='2.2.0-rc3',
+      version='2.2.0',
       description='Command-line interface for High-Availability cluster management',
-      author='Dejan Muhamedagic',
-      author_email='dejan at suse.de',
+      author='Kristoffer Gronlund',
+      author_email='kgronlund at suse.com',
       url='http://crmsh.github.io/',
       packages=['crmsh'],
       package_dir={'crmsh': SRC_PATH})
diff --git a/test/list-undocumented-commands.py b/test/list-undocumented-commands.py
index 60d37b6..28f79af 100755
--- a/test/list-undocumented-commands.py
+++ b/test/list-undocumented-commands.py
@@ -19,7 +19,7 @@ modules.help._load_help()
 _IGNORED_COMMANDS = ('help', 'quit', 'cd', 'up', 'ls')
 
 def check_help(ui):
-    for name, child in ui._children.iteritems():
+    for name, child in ui.children().iteritems():
         if child.type == 'command':
             try:
                 h = modules.help.help_command(ui.name, name)
diff --git a/test/profile-history.sh b/test/profile-history.sh
new file mode 100755
index 0000000..02831f8
--- /dev/null
+++ b/test/profile-history.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+case $1 in
+	cumulative)
+		python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"cumulative\").print_stats()" | less
+		;;
+	time)
+		python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\").print_stats()" | less
+		;;
+	timecum)
+		python -c "import pstats; s = pstats.Stats(\"$2\"); s.sort_stats(\"time\", \"cum\").print_stats()" | less
+		;;
+	callers)
+		python -c "import pstats; s = pstats.Stats(\"$2\"); s.print_callers(.5, \"$3\")" | less
+		;;
+	verbose)
+		PYTHONPATH=. ./crm -X "$2" -H "$3" history log
+		;;
+	*)
+		PYTHONPATH=. ./crm -X "$1" -H "$2" history log >/dev/null
+		;;
+esac
diff --git a/test/testcases/edit.exp b/test/testcases/edit.exp
index 367e61a..e6f8ddd 100644
--- a/test/testcases/edit.exp
+++ b/test/testcases/edit.exp
@@ -140,7 +140,6 @@ group g2 d1 d2
 clone c1 g1
 tag t-d45 d4 d5
 location l1 p3 100: node1
-# this is a comment
 location loc-d1 d1 \
 	rule -inf: not_defined webserver or mem number:lte 0 \
 	rule -inf: not_defined a2 \
diff --git a/test/testcases/history b/test/testcases/history
index 3fc84f3..9e681a4 100644
--- a/test/testcases/history
+++ b/test/testcases/history
@@ -2,6 +2,7 @@ session History
 history
 source history-test.tar.bz2
 info
+events
 node xen-d
 node xen-e
 node .*
@@ -14,6 +15,7 @@ log
 exclude clear
 peinputs
 peinputs v
+transitions
 refresh
 resource d1
 # reduce report span
@@ -26,6 +28,7 @@ transition nograph
 transition -1 nograph
 transition save 0 _crmsh_regtest
 transition log 49
+transition tags 49
 # reset timeframe
 timeframe
 session save _crmsh_regtest
diff --git a/test/testcases/history.exp b/test/testcases/history.exp
index de3e13f..b465992 100644
--- a/test/testcases/history.exp
+++ b/test/testcases/history.exp
@@ -10,8 +10,97 @@ By: unknown
 Period: 2012-12-14 20:06:34 - 2012-12-14 20:08:44
 Nodes: xen-d xen-e
 Groups: 
+Clones: 
 Resources: d1 s-libvirt
-Transitions: 43* 44 45 46 48* 272* 49* 50*
+Transitions: 43 44* 45 46 47 48* 272* 49* 50*
+.INP: events
+Dec 14 20:06:35 xen-d corosync[5649]:  [MAIN  ] Corosync Cluster Engine ('1.4.3'): started and ready to provide service.
+Dec 14 20:06:35 xen-e corosync[24218]:  [MAIN  ] Corosync Cluster Engine ('1.4.3'): started and ready to provide service.
+Dec 14 20:06:36 xen-d corosync[5649]:  [pcmk  ] info: pcmk_peer_update: memb: xen-d 906822154
+Dec 14 20:07:54 xen-e corosync[24218]:  [pcmk  ] info: pcmk_peer_update: memb: xen-e 923599370
+Dec 14 20:07:54 xen-e corosync[24218]:  [pcmk  ] info: pcmk_peer_update: lost: xen-d 906822154
+Dec 14 20:07:54 xen-e pengine: [24227]: WARN: pe_fence_node: Node xen-d will be fenced because it is un-expectedly down
+Dec 14 20:07:54 xen-e crmd: [24228]: notice: te_fence_node: Executing reboot fencing operation (12) on xen-d (timeout=60000)
+Dec 14 20:07:54 xen-e pengine: [24227]: WARN: stage6: Scheduling Node xen-d for STONITH
+Dec 14 20:07:56 xen-e stonith-ng: [24224]: notice: log_operation: Operation 'reboot' [24519] (call 0 from c0c111a5-d332-48f7-9375-739b91e04f0e) for host 'xen-d' with device 's-libvirt' returned: 0
+Dec 14 20:08:23 xen-d corosync[1874]:  [MAIN  ] Corosync Cluster Engine ('1.4.3'): started and ready to provide service.
+Dec 14 20:08:23 xen-d corosync[1874]:  [pcmk  ] info: pcmk_peer_update: memb: xen-d 906822154
+Dec 14 20:08:40 xen-e corosync[24218]:  [pcmk  ] info: pcmk_peer_update: memb: xen-e 923599370
+Dec 14 20:07:19 xen-d lrmd: [5657]: info: rsc:d1 start[4] (pid 5833)
+Dec 14 20:07:19 xen-d crmd: [5660]: info: process_lrm_event: LRM operation d1_start_0 (call=4, rc=0, cib-update=14, confirmed=true) ok
+Dec 14 20:07:19 xen-e lrmd: [24225]: info: rsc:s-libvirt start[4] (pid 24264)
+Dec 14 20:07:20 xen-e external/libvirt(s-libvirt)[24271]: [24288]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:07:21 xen-e crmd: [24228]: info: process_lrm_event: LRM operation s-libvirt_start_0 (call=4, rc=0, cib-update=66, confirmed=true) ok
+Dec 14 20:07:22 xen-e external/libvirt(s-libvirt)[24296]: [24313]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:07:29 xen-d lrmd: [5657]: info: rsc:d1 stop[6] (pid 5926)
+Dec 14 20:07:29 xen-d crmd: [5660]: info: process_lrm_event: LRM operation d1_stop_0 (call=6, rc=0, cib-update=17, confirmed=true) ok
+Dec 14 20:07:29 xen-d lrmd: [5657]: info: rsc:d1 start[7] (pid 5929)
+Dec 14 20:07:29 xen-d crmd: [5660]: info: process_lrm_event: LRM operation d1_start_0 (call=7, rc=0, cib-update=18, confirmed=true) ok
+Dec 14 20:07:29 xen-e external/libvirt(s-libvirt)[24321]: [24338]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24387]: ERROR: Failed to get status for xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen
+Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24393]: ERROR: setlocale: No such file or directory
+Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24393]: error: Cannot recv data: Warning: Identity file /root/.ssh/xen not accessible: No such file or directory.
+Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24393]: Permission denied (publickey,keyboard-interactive). : Connection reset by peer
+Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24393]: error: failed to connect to the hypervisor
+Dec 14 20:07:37 xen-e lrmd: [24225]: info: rsc:s-libvirt stop[6] (pid 24417)
+Dec 14 20:07:37 xen-e crmd: [24228]: info: process_lrm_event: LRM operation s-libvirt_stop_0 (call=6, rc=0, cib-update=74, confirmed=true) ok
+Dec 14 20:07:37 xen-e lrmd: [24225]: info: rsc:s-libvirt start[7] (pid 24418)
+Dec 14 20:07:39 xen-e external/libvirt(s-libvirt)[24425]: [24442]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:07:40 xen-e crmd: [24228]: info: process_lrm_event: LRM operation s-libvirt_start_0 (call=7, rc=0, cib-update=75, confirmed=true) ok
+Dec 14 20:07:41 xen-e external/libvirt(s-libvirt)[24463]: [24480]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:07:48 xen-e external/libvirt(s-libvirt)[24488]: [24505]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:07:55 xen-e external/libvirt(s-libvirt)[24525]: [24540]: notice: Domain xen-d was rebooted
+Dec 14 20:07:56 xen-e lrmd: [24225]: info: rsc:d1 start[9] (pid 24568)
+Dec 14 20:07:56 xen-e crmd: [24228]: info: process_lrm_event: LRM operation d1_start_0 (call=9, rc=0, cib-update=96, confirmed=true) ok
+Dec 14 20:07:57 xen-e external/libvirt(s-libvirt)[24555]: [24588]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:08:07 xen-e external/libvirt(s-libvirt)[24599]: [24616]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:08:15 xen-e external/libvirt(s-libvirt)[24630]: [24647]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:08:22 xen-e external/libvirt(s-libvirt)[24658]: [24678]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:08:26 xen-d lrmd: [1945]: info: rsc:d1 start[4] (pid 2405)
+Dec 14 20:08:26 xen-d crmd: [1948]: info: process_lrm_event: LRM operation d1_start_0 (call=4, rc=0, cib-update=9, confirmed=true) ok
+Dec 14 20:08:29 xen-e external/libvirt(s-libvirt)[24689]: [24706]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:08:36 xen-e external/libvirt(s-libvirt)[24717]: [24734]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:08:43 xen-e lrmd: [24225]: info: rsc:d1 stop[11] (pid 24774)
+Dec 14 20:08:43 xen-e crmd: [24228]: info: process_lrm_event: LRM operation d1_stop_0 (call=11, rc=0, cib-update=125, confirmed=true) ok
+Dec 14 20:08:43 xen-e external/libvirt(s-libvirt)[24748]: [24786]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:06:36 xen-e crmd: [24228]: notice: ais_dispatch_message: Membership 1289820: quorum acquired
+Dec 14 20:06:36 xen-d crmd: [5660]: notice: ais_dispatch_message: Membership 1289820: quorum acquired
+Dec 14 20:07:54 xen-e crmd: [24228]: notice: ais_dispatch_message: Membership 1289824: quorum lost
+Dec 14 20:08:24 xen-d crmd: [1948]: notice: ais_dispatch_message: Membership 1289828: quorum acquired
+Dec 14 20:08:40 xen-e crmd: [24228]: notice: ais_dispatch_message: Membership 1289828: quorum acquired
+Dec 14 20:06:57 xen-e pengine: [24227]: ERROR: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
+Dec 14 20:06:57 xen-e pengine: [24227]: ERROR: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
+Dec 14 20:06:57 xen-e pengine: [24227]: ERROR: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
+Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24387]: ERROR: Failed to get status for xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen
+Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24393]: ERROR: setlocale: No such file or directory
+Dec 14 20:07:37 xen-e stonith-ng: [24224]: ERROR: log_operation: s-libvirt: Performing: stonith -t external/libvirt -S
+Dec 14 20:07:37 xen-e stonith: [24366]: ERROR: external/libvirt device not accessible.
+Dec 14 20:07:37 xen-e stonith-ng: [24224]: ERROR: log_operation: s-libvirt: failed:  1
+Dec 14 20:07:43 xen-d stonith-ng: [5656]: ERROR: ais_dispatch: Receiving message body failed: (2) Library error: Resource temporarily unavailable (11)
+Dec 14 20:07:43 xen-d cib: [5655]: ERROR: ais_dispatch: Receiving message body failed: (2) Library error: Resource temporarily unavailable (11)
+Dec 14 20:07:43 xen-d attrd: [5658]: ERROR: ais_dispatch: Receiving message body failed: (2) Library error: Resource temporarily unavailable (11)
+Dec 14 20:07:43 xen-d crmd: [5660]: ERROR: ais_dispatch: Receiving message body failed: (2) Library error: Resource temporarily unavailable (11)
+Dec 14 20:07:43 xen-d stonith-ng: [5656]: ERROR: ais_dispatch: AIS connection failed
+Dec 14 20:07:43 xen-d cib: [5655]: ERROR: ais_dispatch: AIS connection failed
+Dec 14 20:07:43 xen-d attrd: [5658]: ERROR: ais_dispatch: AIS connection failed
+Dec 14 20:07:43 xen-d crmd: [5660]: ERROR: ais_dispatch: AIS connection failed
+Dec 14 20:07:43 xen-d stonith-ng: [5656]: ERROR: stonith_peer_ais_destroy: AIS connection terminated
+Dec 14 20:07:43 xen-d cib: [5655]: ERROR: cib_ais_destroy: Corosync connection lost!  Exiting.
+Dec 14 20:07:43 xen-d attrd: [5658]: CRIT: attrd_ais_destroy: Lost connection to OpenAIS service!
+Dec 14 20:07:43 xen-d attrd: [5658]: ERROR: attrd_cib_connection_destroy: Connection to the CIB terminated...
+Dec 14 20:07:43 xen-d crmd: [5660]: CRIT: stonith_dispatch_internal: Lost connection to the STONITH service [5656/callback].
+Dec 14 20:07:43 xen-d crmd: [5660]: CRIT: stonith_dispatch_internal: Lost connection to the STONITH service [5656/command].
+Dec 14 20:07:43 xen-d crmd: [5660]: CRIT: tengine_stonith_connection_destroy: Fencing daemon connection failed
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: te_connect_stonith: Sign-in failed: triggered a retry
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: attrd_connection_destroy: Lost connection to attrd
+Dec 14 20:07:44 xen-d crmd: [5660]: CRIT: cib_native_dispatch: Lost connection to the CIB service [5655/callback].
+Dec 14 20:07:44 xen-d crmd: [5660]: CRIT: cib_native_dispatch: Lost connection to the CIB service [5655/command].
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: crmd_cib_connection_destroy: Connection to the CIB terminated...
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: do_log: FSA: Input I_ERROR from crmd_cib_connection_destroy() received in state S_NOT_DC
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: do_recover: Action A_RECOVER (0000000001000000) not supported
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: do_log: FSA: Input I_TERMINATE from do_recover() received in state S_RECOVERY
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: verify_stopped: Resource d1 was active at shutdown.  You may ignore this error if it is unmanaged.
+Dec 14 20:07:44 xen-d crmd: [5660]: ERROR: do_exit: Could not recover from internal error
 .INP: node xen-d
 Dec 14 20:06:35 xen-d corosync[5649]:  [MAIN  ] Corosync Cluster Engine ('1.4.3'): started and ready to provide service.
 Dec 14 20:06:36 xen-d corosync[5649]:  [pcmk  ] info: pcmk_peer_update: memb: xen-d 906822154
@@ -27,11 +116,9 @@ Dec 14 20:06:35 xen-e corosync[24218]:  [MAIN  ] Corosync Cluster Engine ('1.4.3
 Dec 14 20:07:54 xen-e corosync[24218]:  [pcmk  ] info: pcmk_peer_update: memb: xen-e 923599370
 Dec 14 20:08:40 xen-e corosync[24218]:  [pcmk  ] info: pcmk_peer_update: memb: xen-e 923599370
 .INP: node .*
-Dec 14 20:06:35 xen-e corosync[24218]:  [MAIN  ] Corosync Cluster Engine ('1.4.3'): started and ready to provide service.
 Dec 14 20:06:35 xen-d corosync[5649]:  [MAIN  ] Corosync Cluster Engine ('1.4.3'): started and ready to provide service.
+Dec 14 20:06:35 xen-e corosync[24218]:  [MAIN  ] Corosync Cluster Engine ('1.4.3'): started and ready to provide service.
 Dec 14 20:06:36 xen-d corosync[5649]:  [pcmk  ] info: pcmk_peer_update: memb: xen-d 906822154
-Dec 14 20:06:57 xen-d crmd: [5660]: info: do_election_count_vote: Election 2 (owner: xen-e) lost: vote from xen-e (Uptime)
-Dec 14 20:07:19 xen-d crmd: [5660]: info: do_election_count_vote: Election 3 (owner: xen-e) lost: vote from xen-e (Uptime)
 Dec 14 20:07:54 xen-e corosync[24218]:  [pcmk  ] info: pcmk_peer_update: memb: xen-e 923599370
 Dec 14 20:07:54 xen-e corosync[24218]:  [pcmk  ] info: pcmk_peer_update: lost: xen-d 906822154
 Dec 14 20:07:54 xen-e pengine: [24227]: WARN: pe_fence_node: Node xen-d will be fenced because it is un-expectedly down
@@ -77,7 +164,6 @@ Dec 14 20:07:21 xen-e lrmd: [24225]: info: operation start[4] on s-libvirt for c
 Dec 14 20:07:21 xen-e lrmd: [24225]: info: rsc:s-libvirt monitor[5] (pid 24289)
 Dec 14 20:07:22 xen-e external/libvirt(s-libvirt)[24296]: [24313]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:07:23 xen-e lrmd: [24225]: info: operation monitor[5] on s-libvirt for client 24228: pid 24289 exited with return code 0
-Dec 14 20:07:29 xen-e external/libvirt(s-libvirt)[24321]: [24338]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:07:29 xen-d lrmd: [5657]: info: cancel_op: operation monitor[5] on d1 for client 5660, its parameters: CRM_meta_name=[monitor] crm_feature_set=[3.0.6] CRM_meta_timeout=[30000] CRM_meta_interval=[5000]  cancelled
 Dec 14 20:07:29 xen-d lrmd: [5657]: info: rsc:d1 stop[6] (pid 5926)
 Dec 14 20:07:29 xen-d lrmd: [5657]: info: operation stop[6] on d1 for client 5660: pid 5926 exited with return code 0
@@ -85,6 +171,7 @@ Dec 14 20:07:29 xen-d lrmd: [5657]: info: rsc:d1 start[7] (pid 5929)
 Dec 14 20:07:29 xen-d lrmd: [5657]: info: operation start[7] on d1 for client 5660: pid 5929 exited with return code 0
 Dec 14 20:07:29 xen-d lrmd: [5657]: info: rsc:d1 monitor[8] (pid 5938)
 Dec 14 20:07:29 xen-d lrmd: [5657]: info: operation monitor[8] on d1 for client 5660: pid 5938 exited with return code 0
+Dec 14 20:07:29 xen-e external/libvirt(s-libvirt)[24321]: [24338]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24387]: ERROR: Failed to get status for xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen
 Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24393]: ERROR: setlocale: No such file or directory
 Dec 14 20:07:36 xen-e external/libvirt(s-libvirt)[24371]: [24393]: error: Cannot recv data: Warning: Identity file /root/.ssh/xen not accessible: No such file or directory.
@@ -104,14 +191,11 @@ Dec 14 20:07:41 xen-e external/libvirt(s-libvirt)[24463]: [24480]: notice: xen+s
 Dec 14 20:07:42 xen-e lrmd: [24225]: info: operation monitor[8] on s-libvirt for client 24228: pid 24456 exited with return code 0
 Dec 14 20:07:44 xen-d lrmd: [5657]: info: cancel_op: operation monitor[8] on d1 for client 5660, its parameters: CRM_meta_name=[monitor] crm_feature_set=[3.0.6] CRM_meta_timeout=[30000] CRM_meta_interval=[5000]  cancelled
 Dec 14 20:07:48 xen-e external/libvirt(s-libvirt)[24488]: [24505]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
-Dec 14 20:07:55 xen-e external/libvirt(s-libvirt)[24525]: [24540]: notice: Domain xen-d was rebooted
 Dec 14 20:07:55 xen-d shutdown[6093]: shutting down for system reboot
 Dec 14 20:07:55 xen-d init: Switching to runlevel: 6
+Dec 14 20:07:55 xen-e external/libvirt(s-libvirt)[24525]: [24540]: notice: Domain xen-d was rebooted
 Dec 14 20:07:56 xen-e lrmd: [24225]: info: rsc:d1 start[9] (pid 24568)
 Dec 14 20:07:56 xen-e lrmd: [24225]: info: operation start[9] on d1 for client 24228: pid 24568 exited with return code 0
-Dec 14 20:07:57 xen-e lrmd: [24225]: info: rsc:d1 monitor[10] (pid 24577)
-Dec 14 20:07:57 xen-e lrmd: [24225]: info: operation monitor[10] on d1 for client 24228: pid 24577 exited with return code 0
-Dec 14 20:07:57 xen-e external/libvirt(s-libvirt)[24555]: [24588]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:07:57 xen-d logd: [6194]: debug: Stopping ha_logd with pid 1787
 Dec 14 20:07:57 xen-d logd: [6194]: info: Waiting for pid=1787 to exit
 Dec 14 20:07:57 xen-d logd: [1787]: debug: logd_term_action: received SIGTERM
@@ -122,6 +206,9 @@ Dec 14 20:07:57 xen-d logd: [1790]: info: logd_term_write_action: received SIGTE
 Dec 14 20:07:57 xen-d logd: [1790]: debug: Writing out 0 messages then quitting
 Dec 14 20:07:57 xen-d logd: [1790]: info: Exiting write process
 Dec 14 20:07:57 xen-d haveged: haveged stopping due to signal 15
+Dec 14 20:07:57 xen-e lrmd: [24225]: info: rsc:d1 monitor[10] (pid 24577)
+Dec 14 20:07:57 xen-e lrmd: [24225]: info: operation monitor[10] on d1 for client 24228: pid 24577 exited with return code 0
+Dec 14 20:07:57 xen-e external/libvirt(s-libvirt)[24555]: [24588]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:07:58 xen-d logd: [6194]: info: Pid 1787 exited
 Dec 14 20:07:58 xen-d rpcbind: rpcbind terminating on signal. Restart with "rpcbind -w"
 Dec 14 20:07:58 xen-d kernel: Kernel logging (proc) stopped.
@@ -171,9 +258,9 @@ Dec 14 20:08:26 xen-d kernel: [   23.572989] BIOS EDD facility v0.16 2004-Jun-25
 Dec 14 20:08:26 xen-d kernel: [   23.573005] EDD information not available.
 Dec 14 20:08:26 xen-d lrmd: [1945]: info: rsc:d1 monitor[5] (pid 2409)
 Dec 14 20:08:26 xen-d lrmd: [1945]: info: operation monitor[5] on d1 for client 1948: pid 2409 exited with return code 0
-Dec 14 20:08:29 xen-e external/libvirt(s-libvirt)[24689]: [24706]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:08:29 xen-d kernel: [   30.841076] eth0: no IPv6 routers present
 Dec 14 20:08:29 xen-d logger: Mark:HB_REPORT:1355512108
+Dec 14 20:08:29 xen-e external/libvirt(s-libvirt)[24689]: [24706]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:08:36 xen-e external/libvirt(s-libvirt)[24717]: [24734]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 Dec 14 20:08:43 xen-e lrmd: [24225]: info: cancel_op: operation monitor[10] on d1 for client 24228, its parameters: CRM_meta_name=[monitor] crm_feature_set=[3.0.6] CRM_meta_timeout=[30000] CRM_meta_interval=[5000]  cancelled
 Dec 14 20:08:43 xen-e lrmd: [24225]: info: rsc:d1 stop[11] (pid 24774)
@@ -185,23 +272,37 @@ history-test/xen-e/pengine/pe-input-43.bz2
 history-test/xen-e/pengine/pe-input-44.bz2
 history-test/xen-e/pengine/pe-input-45.bz2
 history-test/xen-e/pengine/pe-input-46.bz2
+history-test/xen-e/pengine/pe-input-47.bz2
 history-test/xen-e/pengine/pe-input-48.bz2
 history-test/xen-e/pengine/pe-warn-272.bz2
 history-test/xen-e/pengine/pe-input-49.bz2
 history-test/xen-e/pengine/pe-input-50.bz2
 .INP: peinputs v
-Date       Start    End       Filename      Client     User       Origin
-====       =====    ===       ========      ======     ====       ======
+Date       Start    End       Filename      Client     User       Origin      Tags
+====       =====    ===       ========      ======     ====       ======      ====
 2012-12-14 20:06:57 20:06:57  pe-input-43   crmd       hacluster  xen-e
-2012-12-14 20:07:19 20:07:23  pe-input-44   cibadmin   root       xen-d
+2012-12-14 20:07:19 20:07:23  pe-input-44   cibadmin   root       xen-d   s-libvirt
 2012-12-14 20:07:29 20:07:29  pe-input-45   cibadmin   root       xen-d
 2012-12-14 20:07:29 20:07:29  pe-input-46   cibadmin   root       xen-d
-2012-12-14 20:07:37 20:07:42  pe-input-48   cibadmin   root       xen-d
-2012-12-14 20:07:54 20:07:56  pe-warn-272   cibadmin   root       xen-d
-2012-12-14 20:07:56 20:07:57  pe-input-49   cibadmin   root       xen-d
-2012-12-14 20:08:43 20:08:43  pe-input-50   cibadmin   root       xen-d
+2012-12-14 20:07:37 --:--:--  pe-input-47   cibadmin   root       xen-d   
+2012-12-14 20:07:37 20:07:42  pe-input-48   cibadmin   root       xen-d   s-libvirt
+2012-12-14 20:07:54 20:07:56  pe-warn-272   cibadmin   root       xen-d   d1 s-libvirt xen-d
+2012-12-14 20:07:56 20:07:57  pe-input-49   cibadmin   root       xen-d   d1
+2012-12-14 20:08:43 20:08:43  pe-input-50   cibadmin   root       xen-d   d1
+.INP: transitions
+Time                            Name            Node            Tags
+2012-12-14 20:06:57 - 20:06:57: pe-input-43     xen-e           
+2012-12-14 20:07:19 - 20:07:23: pe-input-44     xen-e           s-libvirt
+2012-12-14 20:07:29 - 20:07:29: pe-input-45     xen-e           
+2012-12-14 20:07:29 - 20:07:29: pe-input-46     xen-e           
+2012-12-14 20:07:37 - --:--:--: pe-input-47     xen-e           
+2012-12-14 20:07:37 - 20:07:42: pe-input-48     xen-e           s-libvirt
+2012-12-14 20:07:54 - 20:07:56: pe-warn-272     xen-e           d1 s-libvirt xen-d
+2012-12-14 20:07:56 - 20:07:57: pe-input-49     xen-e           d1
+2012-12-14 20:08:43 - 20:08:43: pe-input-50     xen-e           d1
 .INP: refresh
-INFO: 16: nothing to refresh if source isn't live
+Refreshing log data...
+9 transitions, 87 events.
 .INP: resource d1
 Dec 14 20:07:19 xen-d lrmd: [5657]: info: rsc:d1 start[4] (pid 5833)
 Dec 14 20:07:19 xen-d crmd: [5660]: info: process_lrm_event: LRM operation d1_start_0 (call=4, rc=0, cib-update=14, confirmed=true) ok
@@ -237,27 +338,25 @@ Dec 14 20:08:43 xen-e lrmd: [24225]: info: rsc:d1 stop[11] (pid 24774)
 Dec 14 20:08:43 xen-e lrmd: [24225]: info: operation stop[11] on d1 for client 24228: pid 24774 exited with return code 0
 Dec 14 20:08:43 xen-e external/libvirt(s-libvirt)[24748]: [24786]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 .INP: transition nograph
-INFO: 24: running ptest with history-test/xen-e/pengine/pe-input-50.bz2
+INFO: 26: running ptest with history-test/xen-e/pengine/pe-input-50.bz2
 .EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
 Transition xen-e:pe-input-50 (20:08:43 - 20:08:43):
 	total 8 actions: 8 Complete
 Dec 14 20:08:43 xen-e lrmd: [24225]: info: rsc:d1 stop[11] (pid 24774)
 Dec 14 20:08:43 xen-e external/libvirt(s-libvirt)[24748]: [24786]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 .INP: transition -1 nograph
-INFO: 25: running ptest with history-test/xen-e/pengine/pe-input-49.bz2
+INFO: 27: running ptest with history-test/xen-e/pengine/pe-input-49.bz2
 .EXT >/dev/null 2>&1 crm_simulate -x - -S -VV
 Transition xen-e:pe-input-49 (20:07:56 - 20:07:57):
 	total 2 actions: 2 Complete
 Dec 14 20:07:56 xen-e lrmd: [24225]: info: rsc:d1 start[9] (pid 24568)
 Dec 14 20:07:57 xen-e external/libvirt(s-libvirt)[24555]: [24588]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
 .INP: transition save 0 _crmsh_regtest
-INFO: 26: transition history-test/xen-e/pengine/pe-input-50.bz2 saved to shadow _crmsh_regtest
+INFO: 28: transition history-test/xen-e/pengine/pe-input-50.bz2 saved to shadow _crmsh_regtest
 .INP: transition log 49
 Dec 14 20:07:56 xen-e lrmd: [24225]: info: rsc:d1 start[9] (pid 24568)
 Dec 14 20:07:56 xen-e lrmd: [24225]: info: operation start[9] on d1 for client 24228: pid 24568 exited with return code 0
-Dec 14 20:07:57 xen-e lrmd: [24225]: info: rsc:d1 monitor[10] (pid 24577)
-Dec 14 20:07:57 xen-e lrmd: [24225]: info: operation monitor[10] on d1 for client 24228: pid 24577 exited with return code 0
-Dec 14 20:07:57 xen-e external/libvirt(s-libvirt)[24555]: [24588]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+Dec 14 20:07:57 xen-d logd: [6194]: debug: Stopping ha_logd with pid 1787
 Dec 14 20:07:57 xen-d logd: [6194]: info: Waiting for pid=1787 to exit
 Dec 14 20:07:57 xen-d logd: [1787]: debug: logd_term_action: received SIGTERM
 Dec 14 20:07:57 xen-d logd: [1787]: debug: logd_term_action: waiting for 0 messages to be read for process lrmd
@@ -267,6 +366,11 @@ Dec 14 20:07:57 xen-d logd: [1790]: info: logd_term_write_action: received SIGTE
 Dec 14 20:07:57 xen-d logd: [1790]: debug: Writing out 0 messages then quitting
 Dec 14 20:07:57 xen-d logd: [1790]: info: Exiting write process
 Dec 14 20:07:57 xen-d haveged: haveged stopping due to signal 15
+Dec 14 20:07:57 xen-e lrmd: [24225]: info: rsc:d1 monitor[10] (pid 24577)
+Dec 14 20:07:57 xen-e lrmd: [24225]: info: operation monitor[10] on d1 for client 24228: pid 24577 exited with return code 0
+Dec 14 20:07:57 xen-e external/libvirt(s-libvirt)[24555]: [24588]: notice: xen+ssh://hex-12.suse.de/?keyfile=/root/.ssh/xen: Running hypervisor: Xen 4.1.0
+.INP: transition tags 49
+d1
 .INP: # reset timeframe
 .INP: timeframe
 .INP: session save _crmsh_regtest
@@ -280,7 +384,5 @@ Report saved in '/root/_crmsh_regtest.tar.bz2'
 .TRY History 2
 .INP: history
 .INP: session load _crmsh_regtest
-.EXT tar -tj < history-test.tar.bz2 2> /dev/null | head -1
-.EXT tar -xj < history-test.tar.bz2
 .INP: exclude
 corosync|crmd|pengine|stonith-ng|cib|attrd|mgmtd|sshd
diff --git a/test/testcases/ra.exp b/test/testcases/ra.exp
index 59318f7..3ef9673 100644
--- a/test/testcases/ra.exp
+++ b/test/testcases/ra.exp
@@ -22,7 +22,7 @@ but moderate. The minimum timeouts should never be below 10 seconds.
 
 Parameters (*: required, []: default):
 
-state (string, [/var/run//Dummy-{OCF_RESOURCE_INSTANCE}.state]): State file
+state (string, [/var/run/Dummy-Dummy.state]): State file
     Location to store the resource state in.
 
 passwd (string): Password
@@ -81,6 +81,10 @@ pcmk_delay_max (time, [0s]): Enable random delay for stonith actions and specify
     This prevents double fencing when using slow devices such as sbd.
     Use this to enable random delay for stonith actions and specify the maximum of random delay.
 
+pcmk_action_limit (integer, [1]): The maximum number of actions can be performed in parallel on this device
+    Pengine property concurrent-fencing=true needs to be configured first.
+    Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.
+
 pcmk_reboot_action (string, [reboot]): Advanced use only: An alternate command to run instead of 'reboot'
     Some devices do not support the standard commands or may provide additional ones.
     Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
diff --git a/test/testcases/scripts.exp b/test/testcases/scripts.exp
index be4a6d3..a6317f3 100644
--- a/test/testcases/scripts.exp
+++ b/test/testcases/scripts.exp
@@ -14,7 +14,7 @@ ERROR: 2: Error when loading script haproxy: No meta-data for agent: systemd:hap
 .EXT crm_resource --show-metadata ocf:heartbeat:Raid1
 Basic:
 
-mailto           MailTo
+mailto           E-Mail
 virtual-ip       Virtual IP
 
 Database:
@@ -29,11 +29,15 @@ Filesystem:
 clvm             Cluster-aware LVM
 clvm-vg          Cluster-aware LVM (Volume Group)
 drbd             DRBD Block Device
-filesystem       Filesystem (mount point)
-gfs2             gfs2 filesystem (cloned)
-gfs2-base        gfs2 filesystem base (cloned)
-ocfs2            OCFS2 filesystem (cloned)
-raid-lvm         RAID hosting LVM
+filesystem       File System (mount point)
+gfs2             GFS2 File System (Cloned)
+ocfs2            OCFS2 File System
+raid-lvm         RAID Hosting LVM
+
+NFS:
+
+exportfs         NFS Exported File System
+nfsserver        NFS Server
 
 SAP:
 
@@ -46,8 +50,6 @@ sap-simple-stack-plus SAP SimpleStack+ Instance
 Server:
 
 apache           Apache Webserver
-exportfs         NFS Exported File System
-nfsserver        NFS Server
 
 Stonith:
 
@@ -59,7 +61,7 @@ sbd              SBD, Shared storage based fencing
 ERROR: 3: Error when loading script haproxy: No meta-data for agent: systemd:haproxy
 Basic:
 
-mailto           MailTo
+mailto           E-Mail
 virtual-ip       Virtual IP
 
 Database:
@@ -74,11 +76,15 @@ Filesystem:
 clvm             Cluster-aware LVM
 clvm-vg          Cluster-aware LVM (Volume Group)
 drbd             DRBD Block Device
-filesystem       Filesystem (mount point)
-gfs2             gfs2 filesystem (cloned)
-gfs2-base        gfs2 filesystem base (cloned)
-ocfs2            OCFS2 filesystem (cloned)
-raid-lvm         RAID hosting LVM
+filesystem       File System (mount point)
+gfs2             GFS2 File System (Cloned)
+ocfs2            OCFS2 File System
+raid-lvm         RAID Hosting LVM
+
+NFS:
+
+exportfs         NFS Exported File System
+nfsserver        NFS Server
 
 SAP:
 
@@ -92,6 +98,7 @@ Script:
 
 add              Add a new node to an already existing cluster
 check-uptime     Check uptime of nodes
+gfs2-base        GFS2 File System Base (Cloned)
 health           Check the health of the cluster
 init             Initialize a new cluster
 lvm              Controls the availability of an LVM Volume Group
@@ -103,8 +110,6 @@ sapinstance      SAP Instance
 Server:
 
 apache           Apache Webserver
-exportfs         NFS Exported File System
-nfsserver        NFS Server
 
 Stonith:
 
@@ -221,10 +226,9 @@ virtual-ip
 ERROR: 7: script.list: Unexpected argument 'bogus': expected  [all|names]
 .INP: show mailto
 mailto (Basic)
-MailTo
+E-Mail
 
- This is a resource agent for MailTo. It sends email to a sysadmin
-whenever  a takeover occurs.
+Notifies recipient by e-mail in the event of a resource takeover.
 
 1. Notifies recipients by email in the event of resource takeover
 
@@ -253,7 +257,7 @@ whenever  a takeover occurs.
 	clone c-foo foo
 
 .INP: run mailto id=foo email=test at example.com subject=hello nodes=node1 dry_run=true
-INFO: 10: MailTo
+INFO: 10: E-Mail
 INFO: 10: Nodes: node1
 ** all - #!/usr/bin/env python
 import crm_script
diff --git a/test/unittests/test_bugs.py b/test/unittests/test_bugs.py
index e5b3149..b310a52 100644
--- a/test/unittests/test_bugs.py
+++ b/test/unittests/test_bugs.py
@@ -32,7 +32,7 @@ def test_bug41660_1():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     print etree.tostring(obj.node)
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print data
     exp = 'primitive bug41660 ocf:pacemaker:Dummy meta target-role=Stopped'
     assert data == exp
@@ -71,7 +71,7 @@ def test_bug41660_2():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    #data = obj.repr_cli(format=-1)
+    #data = obj.repr_cli(format_mode=-1)
     #print data
     #exp = 'clone libvirtd-clone libvirtd meta interleave=true ordered=true target-role=Stopped'
     #assert data == exp
@@ -112,7 +112,7 @@ def test_bug41660_3():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print data
     exp = 'clone libvirtd-clone libvirtd meta target-role=Stopped'
     assert data == exp
@@ -250,7 +250,7 @@ end="2014-05-17 17:56:11Z"/>
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print "OUTPUT:", data
     exp = 'location cli-prefer-dummy-resource dummy-resource role=Started rule #uname eq x64-4 and date lt "2014-05-17 17:56:11Z"'
     assert data == exp
@@ -266,7 +266,7 @@ def test_order_without_score_kind():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print "OUTPUT:", data
     exp = 'order order-a-b a:promote b:start'
     assert data == exp
@@ -338,7 +338,7 @@ def test_pengine_test():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print "OUTPUT:", data
     exp = 'primitive rsc1 ocf:pacemaker:Dummy params rule 0: #cluster-name eq clusterA state="/var/run/Dummy-rsc1-clusterA" params rule 0: #cluster-name eq clusterB state="/var/run/Dummy-rsc1-clusterB" op monitor interval=10'
     assert data == exp
@@ -386,7 +386,7 @@ def test_op_role():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print "OUTPUT:", data
     exp = 'primitive rsc2 ocf:pacemaker:Dummy op monitor interval=10 role=Stopped'
     assert data == exp
@@ -405,7 +405,7 @@ def test_nvpair_no_value():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print "OUTPUT:", data
     exp = 'primitive rsc3 Dummy params verbose verbase="" verbese=" "'
     assert data == exp
@@ -426,7 +426,7 @@ def test_delete_ticket():
         data = etree.fromstring(x)
         obj = factory.create_from_node(data)
         assert obj is not None
-        data = obj.repr_cli(format=-1)
+        data = obj.repr_cli(format_mode=-1)
 
     factory.delete('daa0')
     assert factory.find_object('daa0') is None
@@ -447,7 +447,7 @@ def test_quotes():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print "OUTPUT:", data
     exp = 'primitive q1 ocf:pacemaker:Dummy params state="foo\\"foo\\""'
     assert data == exp
@@ -472,7 +472,7 @@ def test_nodeattrs():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     exp = 'node 1: dell71 attributes staging-0-0-placement=true meta-0-0-placement=true attributes standby=off'
     assert data == exp
     assert obj.cli_use_validate()
@@ -492,7 +492,7 @@ def test_nodeattrs2():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert obj is not None
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     exp = 'node h04 utilization utl_ram=1200 utl_cpu=200 attributes standby=off'
     assert data == exp
     assert obj.cli_use_validate()
@@ -700,14 +700,14 @@ primitive node1 Dummy params fake=something
     obj = cibconfig.mkset_obj()
     assert obj is not None
     ok = obj.save("""primitive node1 Dummy params fake=something-else
-    """, no_remove=True, method='update')
+    """, remove=False, method='update')
     assert ok
 
     print "** end"
 
     obj = cibconfig.mkset_obj()
     assert obj is not None
-    ok = obj.save(original_cib, no_remove=False, method='replace')
+    ok = obj.save(original_cib, remove=True, method='replace')
     assert ok
     obj = cibconfig.mkset_obj()
     with clidisplay.nopretty():
@@ -873,7 +873,7 @@ def test_bug959895():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     print etree.tostring(obj.node)
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print data
     exp = 'clone c-bug959895 g-bug959895'
     assert data == exp
@@ -908,7 +908,7 @@ def test_node_util_attr():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     print etree.tostring(obj.node)
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print data
     exp = 'node aberfeldy utilization cpu=2 memory=500 attributes standby=on'
     assert data == exp
diff --git a/test/unittests/test_cliformat.py b/test/unittests/test_cliformat.py
index a96927a..180b6c6 100644
--- a/test/unittests/test_cliformat.py
+++ b/test/unittests/test_cliformat.py
@@ -15,7 +15,7 @@ def assert_is_not_none(thing):
     assert thing is not None, "Expected non-None value"
 
 
-def roundtrip(cli, debug=False, expected=None):
+def roundtrip(cli, debug=False, expected=None, format_mode=-1, strip_color=False):
     node, _, _ = cibconfig.parse_cli_to_xml(cli, validation=MockValidation())
     assert_is_not_none(node)
     obj = factory.find_object(node.get("id"))
@@ -24,10 +24,13 @@ def roundtrip(cli, debug=False, expected=None):
     obj = factory.create_from_node(node)
     assert_is_not_none(obj)
     obj.nocli = True
-    xml = obj.repr_cli(format=-1)
+    xml = obj.repr_cli(format_mode=format_mode)
     print xml
     obj.nocli = False
-    s = obj.repr_cli(format=-1)
+    s = obj.repr_cli(format_mode=format_mode)
+    if strip_color:
+        import re
+        s = re.sub(r"\$\{[^}]+\}", "", s)
     if (s != cli) or debug:
         print "GOT:", s
         print "EXP:", cli
@@ -86,19 +89,19 @@ def test_broken_colo():
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert_is_not_none(obj)
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     eq_('colocation colo-2 inf: [ vip1 vip2 sequential=true ] [ apache:Master sequential=true ]', data)
     assert obj.cli_use_validate()
 
 
 @with_setup(setup_func, teardown_func)
 def test_comment():
-    roundtrip("# comment 1\nprimitive d0 ocf:pacemaker:Dummy")
+    roundtrip("# comment 1\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
 
 
 @with_setup(setup_func, teardown_func)
 def test_comment2():
-    roundtrip("# comment 1\n# comment 2\n# comment 3\nprimitive d0 ocf:pacemaker:Dummy")
+    roundtrip("# comment 1\n# comment 2\n# comment 3\nprimitive d0 ocf:pacemaker:Dummy", format_mode=0, strip_color=True)
 
 
 @with_setup(setup_func, teardown_func)
@@ -129,7 +132,7 @@ value="Stopped"/> \
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert_is_not_none(obj)
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print data
     exp = 'primitive dummy ocf:pacemaker:Dummy op start timeout=60 interval=0 op stop timeout=60 interval=0 op monitor interval=60 timeout=30 meta target-role=Stopped'
     eq_(exp, data)
@@ -152,7 +155,7 @@ value="Stopped"/> \
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert_is_not_none(obj)
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print data
     exp = 'primitive dummy2 ocf:pacemaker:Dummy meta target-role=Stopped ' \
           'op start timeout=60 interval=0 op stop timeout=60 interval=0 ' \
@@ -174,7 +177,7 @@ target="ha-one"></fencing-level>
     data = etree.fromstring(xml)
     obj = factory.create_from_node(data)
     assert_is_not_none(obj)
-    data = obj.repr_cli(format=-1)
+    data = obj.repr_cli(format_mode=-1)
     print data
     exp = 'fencing_topology st1'
     eq_(exp, data)
diff --git a/test/unittests/test_corosync.py b/test/unittests/test_corosync.py
index db8dd8c..786295e 100644
--- a/test/unittests/test_corosync.py
+++ b/test/unittests/test_corosync.py
@@ -67,7 +67,7 @@ class TestCorosyncParser(unittest.TestCase):
         p.add('nodelist',
               make_section('nodelist.node',
                            make_value('nodelist.node.ring0_addr', '10.10.10.10') +
-                           make_value('nodelist.node.nodeid', str(corosync.next_nodeid(p)))))
+                           make_value('nodelist.node.nodeid', str(corosync.get_free_nodeid(p)))))
         _valid(p)
         self.assertEqual(p.count('nodelist.node'), 6)
         self.assertEqual(p.get_all('nodelist.node.nodeid'),
@@ -75,11 +75,11 @@ class TestCorosyncParser(unittest.TestCase):
 
     def test_add_node_no_nodelist(self):
         "test checks that if there is no nodelist, no node is added"
-        from crmsh.corosync import make_section, make_value, next_nodeid
+        from crmsh.corosync import make_section, make_value, get_free_nodeid
 
         p = Parser(F1)
         _valid(p)
-        nid = next_nodeid(p)
+        nid = get_free_nodeid(p)
         self.assertEqual(p.count('nodelist.node'), nid - 1)
         p.add('nodelist',
               make_section('nodelist.node',
@@ -89,11 +89,11 @@ class TestCorosyncParser(unittest.TestCase):
         self.assertEqual(p.count('nodelist.node'), nid - 1)
 
     def test_add_node_nodelist(self):
-        from crmsh.corosync import make_section, make_value, next_nodeid
+        from crmsh.corosync import make_section, make_value, get_free_nodeid
 
         p = Parser(F2)
         _valid(p)
-        nid = next_nodeid(p)
+        nid = get_free_nodeid(p)
         c = p.count('nodelist.node')
         p.add('nodelist',
               make_section('nodelist.node',
@@ -101,7 +101,7 @@ class TestCorosyncParser(unittest.TestCase):
                            make_value('nodelist.node.nodeid', str(nid))))
         _valid(p)
         self.assertEqual(p.count('nodelist.node'), c + 1)
-        self.assertEqual(next_nodeid(p), nid + 1)
+        self.assertEqual(get_free_nodeid(p), nid + 1)
 
     def test_remove_node(self):
         p = Parser(F2)
@@ -118,5 +118,15 @@ class TestCorosyncParser(unittest.TestCase):
         _valid(p)
         self.assertEqual(p.count('service.ver'), 1)
 
+    def test_get_free_nodeid(self):
+        def ids(*lst):
+            class Ids(object):
+                def get_all(self, _arg):
+                    return lst
+            return Ids()
+        self.assertEqual(1, corosync.get_free_nodeid(ids('2', '5')))
+        self.assertEqual(3, corosync.get_free_nodeid(ids('1', '2', '5')))
+        self.assertEqual(4, corosync.get_free_nodeid(ids('1', '2', '3')))
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/test/unittests/test_scripts.py b/test/unittests/test_scripts.py
index 84240c7..f56b211 100644
--- a/test/unittests/test_scripts.py
+++ b/test/unittests/test_scripts.py
@@ -824,3 +824,54 @@ def test_unified():
     pprint(actions)
     eq_(len(actions), 1)
     eq_('primitive bar IPaddr2 ip=192.168.0.15\ngroup g-foo foo bar', actions[-1]['text'].strip())
+
+
+class TestPrinter(object):
+    def __init__(self):
+        import types
+        self.actions = []
+
+        def add_capture(name):
+            def capture(obj, *args):
+                obj.actions.append((name, args))
+            self.__dict__[name] = types.MethodType(capture, self)
+        for name in ('print_header', 'debug', 'error', 'start', 'flush', 'print_command', 'finish'):
+            add_capture(name)
+
+ at with_setup(setup_func, teardown_func)
+def test_inline_script():
+    """
+    Test inline script feature for call actions
+    """
+
+    a = '''---
+- version: 2.2
+  category: Script
+  parameters:
+    - name: foo
+      required: true
+      type: string
+  actions:
+    - call: |
+        #!/bin/sh
+        echo "{{foo}}"
+      nodes: local
+'''
+
+    script_a = scripts.load_script_string('foofoo', a)
+    assert script_a is not None
+
+    actions = scripts.verify(script_a,
+                             {"foo": "hello world"}, external_check=False)
+    pprint(actions)
+    assert len(actions) == 1
+    assert actions[0]['name'] == 'call'
+    assert actions[0]['value'] == '#!/bin/sh\necho "hello world"'
+    tp = TestPrinter()
+    scripts.run(script_a,
+                {"foo": "hello world"}, tp)
+
+    for action, args in tp.actions:
+        print action, args
+        if action == 'finish':
+            assert args[0]['value'] == '#!/bin/sh\necho "hello world"'
diff --git a/test/unittests/test_time.py b/test/unittests/test_time.py
index 4d0cab9..6315c5d 100644
--- a/test/unittests/test_time.py
+++ b/test/unittests/test_time.py
@@ -3,7 +3,7 @@
 
 
 from crmsh import utils
-from crmsh import history
+from crmsh import logtime
 from nose.tools import eq_
 import time
 import datetime
@@ -14,4 +14,11 @@ def test_time_convert1():
     loctz = dateutil.tz.tzlocal()
     tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
     dt = utils.parse_time('Jun 01, 2015 10:00:00')
-    eq_(history.human_date(dt), time.strftime('%Y-%m-%d %H:%M:%S', tm))
+    eq_(logtime.human_date(dt), time.strftime('%Y-%m-%d %H:%M:%S', tm))
+
+
+def test_time_convert2():
+    loctz = dateutil.tz.tzlocal()
+    tm = time.localtime(utils.datetime_to_timestamp(utils.make_datetime_naive(datetime.datetime(2015, 6, 1, 10, 0, 0).replace(tzinfo=loctz))))
+    ts = time.localtime(utils.parse_to_timestamp('Jun 01, 2015 10:00:00'))
+    eq_(time.strftime('%Y-%m-%d %H:%M:%S', ts), time.strftime('%Y-%m-%d %H:%M:%S', tm))

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/crmsh.git



More information about the Debian-HA-Commits mailing list