[Debian-ha-commits] [resource-agents] 01/11: New upstream version 4.1.0~rc1

Valentin Vidic vvidic-guest at moszumanska.debian.org
Sun Nov 26 13:19:35 UTC 2017


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository resource-agents.

commit f047456b19e0382944d58d4c2c91a5295744f023
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Sat Nov 25 23:15:45 2017 +0100

    New upstream version 4.1.0~rc1
---
 .travis.yml                         |   5 +-
 ChangeLog                           |  72 ++++
 Makefile.am                         |   2 +-
 ci/build.sh                         |   4 +-
 configure.ac                        |  11 +-
 doc/dev-guides/ra-dev-guide.asc     |   6 +-
 doc/man/Makefile.am                 |   8 +
 heartbeat/CTDB                      |  21 +-
 heartbeat/IPaddr2                   | 215 ++++++---
 heartbeat/LVM                       | 333 ++------------
 heartbeat/LVM-activate              | 840 ++++++++++++++++++++++++++++++++++++
 heartbeat/Makefile.am               |  13 +
 heartbeat/NodeUtilization           | 226 ++++++++++
 heartbeat/Raid1                     |  34 +-
 heartbeat/Route                     |  29 +-
 heartbeat/SAPInstance               |  25 ++
 heartbeat/VirtualDomain             | 106 ++++-
 heartbeat/ZFS                       | 192 +++++++++
 heartbeat/anything                  |  20 +-
 heartbeat/aws-vpc-move-ip           | 306 +++++++++++++
 heartbeat/aws-vpc-route53           | 302 +++++++++++++
 heartbeat/awseip                    |  28 +-
 heartbeat/awsvip                    |  18 +-
 heartbeat/clvm                      |   7 +-
 heartbeat/db2                       |  16 +-
 heartbeat/docker                    | 110 ++++-
 heartbeat/exportfs                  |  10 +-
 heartbeat/galera                    | 117 ++++-
 heartbeat/iSCSILogicalUnit          |  57 ++-
 heartbeat/iSCSITarget               |   7 +
 heartbeat/kamailio                  |  71 ++-
 heartbeat/lvm-clvm.sh               |  86 ++++
 heartbeat/lvm-plain.sh              |  62 +++
 heartbeat/lvm-tag.sh                | 205 +++++++++
 heartbeat/lvmlockd                  | 352 +++++++++++++++
 heartbeat/minio                     | 289 +++++++++++++
 heartbeat/mysql                     |  20 +-
 heartbeat/named                     |  14 +-
 heartbeat/nfsserver                 |   8 +-
 heartbeat/nginx                     |   2 +-
 heartbeat/ocf-directories.in        |   2 +-
 heartbeat/ocf-shellfuncs.in         | 160 ++++++-
 heartbeat/oraasm                    | 179 ++++++++
 heartbeat/ovsmonitor                | 450 +++++++++++++++++++
 heartbeat/pgsql                     | 152 ++++---
 heartbeat/portblock                 |   2 +-
 heartbeat/rabbitmq-cluster          | 157 ++++---
 heartbeat/redis                     |  13 +-
 heartbeat/{docker => rkt}           | 262 +++++------
 heartbeat/sapdb.sh                  |   2 +-
 heartbeat/sg_persist                |   4 +-
 heartbeat/varnish                   |  72 +++-
 resource-agents.spec.in             |  54 ++-
 rgmanager/src/resources/Makefile.am |   2 +-
 systemd/Makefile.am                 |  25 ++
 systemd/resource-agents-deps.target |   2 +
 systemd/resource-agents.conf        |   1 +
 tools/send_arp.libnet.c             |   4 +-
 tools/send_arp.linux.c              |  43 ++
 59 files changed, 5061 insertions(+), 774 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index e6943fa..62ae29e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,9 +3,10 @@ sudo: false
 
 addons:
   apt:
-    sources:
-      - debian-sid
+    #sources:
+    #  - debian-sid
     packages:
+      - libxml2-utils
       - shellcheck
 script:
   - ./ci/build.sh
diff --git a/ChangeLog b/ChangeLog
index bed973c..653ae38 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,75 @@
+* Tue Nov 14 2017 resource-agents contributors
+- release candidate 4.1.0 rc1
+- nfsserver: allow stop to timeout
+- LVM-activate: add new RA for LVM activation
+- docker: add docker-native healthcheck query
+- aws-vpc-route53: Fix tempfile race (bsc#1059312)
+- aws-vpc-move-ip: cleanup and improvements
+- pgsql: add support for PostgreSQL 10
+- rabbitmq-cluster/redis/galera use ocf_attribute_target
+- ocf-shellfuncs: add ocf_attribute_target()
+- rabbitmq-cluster: Use RMQ_CTL var everywhere instead of rabbitmqctl
+- CI: add libxml2-utils to package list
+- systemd-tmpfiles: create the resource-agents directory
+- rkt: new RA
+- lvmlockd: new RA
+- galera: fix instance name in master_exists()
+- Route: remove debug output
+- Route: add family attribute to set address family
+- galera: honor "safe_to_bootstrap" flag in grastate.dat
+- VirtualDomain: add shutdown_mode attribute
+- aws-vpc-route53: new RA (fate#322781)
+- awseip/awsvip: fixes and improvements
+- ZFS: new RA
+- minio: new RA
+- CTDB: don't fail on empty persistent directory (bsc#1052577)
+- sapdb.sh: add hdbnameserver to monitor services
+- VirtualDomain: new attributes migrateuri, remoteuri, migration_user
+- Raid1: handle case when mddev is a symlink
+- portblock: suppress dd output
+- kamailio: fixes and enhancements for v5.0
+- ocf-shellfuncs: improve locking (ocf_take_lock())
+- anything: create PID directory if it doesnt exist
+- anything: allow multiple instances of binfiles to be run (pidfile will be unique)
+- mysql: properly detect read-only state
+- iSCSILogicalUnit: add emulate_tpu, emulate_3pc and emulate_caw parameters
+- fs.sh: fix builds when srcdir and builddir are seperated
+- LVM: warn when cache mode is not writethrough
+- nginx: fix return code when configfile check fails
+- SAPInstance: Add IS_ERS parameter (bsc#1036486)
+- systemd: add resource-agents-deps target
+- ocf-shellfuncs: simplify ocf_run returned rc
+- docker: add mount_points parameter to create directories used by container if they doesnt exist
+- IPaddr2: add option for specifying IPv6's preferred_lft
+- galera: fix master target during promotion with cluster_host_map
+- rabbitmq-cluster: backup and restore policies
+- DB2: fix HADR support for DB2 V98+
+- pgsql: fix regex to detect async mode
+- rabbitmq-cluster: fix to work on Pacemaker remote nodes
+- oraasm: new RA for Oracle ASM Disk Groups
+- pgsql: fix undefined variable
+- ovsmonitor: new RA
+- NodeUtilization: new RA
+- CTDB: fix for --logfile being replaced with --logging
+- pgsql: allow dynamic membership
+- redis: wait for pid file to appear after start
+- redis: add check and default for redis-check-rdb
+- named: add support for rndc options
+- docker: deal with image name correctly
+- iSCSILogicalUnit: replace openssl with md5sum
+- ra-dev-guide: fix description of OCF_ERR_ARGS
+- clvm: remove reload action from metadata
+- galera: fix the first bootstrap when cluster has no data
+- galera: fix permission of temporary log file for mariadb 10.1.21+
+- kamailio: add kamctl, kamgroup and extra_options parameters
+- ocf_log: use same log format as pacemaker
+- pgsql: replace "crm_failcount" with "crm_resource"
+- ocf-shellfuncs: ocf_run: avoid shell glob expansion of log messages
+- iSCSILogicalUnit: add lio-t IPv6-support
+- iSCSILogicalUnit/iSCSITarget: protect targetcli invocations with locks
+- mysql: set correct master score after maintenance mode
+- varnish: add support for v4.0
+
 * Thu Feb  2 2017 resource-agents contributors
 - stable release 4.0.1
 - galera: remove "long SST monitoring" support due to corner-case issues
diff --git a/Makefile.am b/Makefile.am
index 1769c6e..fccaca4 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -37,7 +37,7 @@ RGMANAGER = with
 endif
 
 if BUILD_LINUX_HA
-SUBDIRS	+= include heartbeat tools ldirectord doc
+SUBDIRS	+= include heartbeat tools ldirectord doc systemd
 LINUX_HA = without
 else
 LINUX_HA = with
diff --git a/ci/build.sh b/ci/build.sh
index a04973a..608387a 100755
--- a/ci/build.sh
+++ b/ci/build.sh
@@ -68,7 +68,7 @@ check_all_executables() {
 		check "$script"
 	done < <(eval "$(find_cmd)")
 	if [ $failed -gt 0 ]; then
-		echo "$failed failures detected."
+		echo "ci/build.sh: $failed failure(s) detected."
 		exit 1
 	fi
 	exit 0
@@ -77,5 +77,5 @@ check_all_executables() {
 ./autogen.sh
 ./configure
 make check
-[ $? ] || failed=$((failed + 1))
+[ $? -eq 0 ] || failed=$((failed + 1))
 check_all_executables
diff --git a/configure.ac b/configure.ac
index 3bb0b7d..e02d754 100644
--- a/configure.ac
+++ b/configure.ac
@@ -76,6 +76,14 @@ AS_IF([test "x$with_systemdsystemunitdir" != "xno"],
       [AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])])
 AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"])
 
+AC_ARG_WITH([systemdtmpfilesdir],
+     AS_HELP_STRING([--with-systemdtmpfilesdir=DIR], [Directory for systemd tmp files]),
+     [], [with_systemdtmpfilesdir=$($PKGCONFIG --variable=tmpfilesdir systemd)])
+     if test "x$with_systemdtmpfilesdir" != xno; then
+         AC_SUBST([systemdtmpfilesdir], [$with_systemdtmpfilesdir])
+     fi
+AM_CONDITIONAL(HAVE_SYSTEMD, [test -n "$with_systemdtmpfilesdir" -a "x$with_systemdtmpfilesdir" != xno ])
+
 dnl 
 dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz])
 dnl
@@ -864,6 +872,7 @@ heartbeat/Makefile						\
    heartbeat/ocf-directories					\
    heartbeat/ocf-shellfuncs					\
    heartbeat/shellfuncs						\
+systemd/Makefile						\
 tools/Makefile							\
    tools/ocf-tester						\
    tools/ocft/Makefile						\
@@ -901,7 +910,7 @@ dnl *****************
 AC_MSG_RESULT([])
 AC_MSG_RESULT([$PACKAGE configuration:])
 AC_MSG_RESULT([  Version                  = ${VERSION}])
-AC_MSG_RESULT([  Build Version            = 150fb85f2a442f53157fb8063089817e6ee05b00])
+AC_MSG_RESULT([  Build Version            = 4a3326643bc4b45520c525c0094ac98a9746da6a])
 AC_MSG_RESULT([  Features                 =${PKG_FEATURES}])
 AC_MSG_RESULT([])
 AC_MSG_RESULT([  Prefix                   = ${prefix}])
diff --git a/doc/dev-guides/ra-dev-guide.asc b/doc/dev-guides/ra-dev-guide.asc
index 7191bdd..9fe9bfa 100644
--- a/doc/dev-guides/ra-dev-guide.asc
+++ b/doc/dev-guides/ra-dev-guide.asc
@@ -261,10 +261,8 @@ the same node.
 
 === +OCF_ERR_ARGS+ (2)
 
-The resource agent was invoked with incorrect arguments. This is a
-safety net "can't happen" error which the resource agent should only
-return when invoked with, for example, an incorrect number of command
-line arguments.
+The resource’s configuration is not valid on this machine. E.g. it
+refers to a location not found on the node.
 
 NOTE: The resource agent should not return this error when instructed
 to perform an action that it does not support. Instead, under those
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index 43a3f70..e8a0f19 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -69,10 +69,12 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
                           ocf_heartbeat_IPaddr2.7 \
                           ocf_heartbeat_IPsrcaddr.7 \
                           ocf_heartbeat_LVM.7 \
+                          ocf_heartbeat_LVM-activate.7 \
                           ocf_heartbeat_LinuxSCSI.7 \
                           ocf_heartbeat_MailTo.7 \
                           ocf_heartbeat_ManageRAID.7 \
                           ocf_heartbeat_ManageVE.7 \
+                          ocf_heartbeat_NodeUtilization.7 \
                           ocf_heartbeat_Pure-FTPd.7 \
                           ocf_heartbeat_Raid1.7 \
                           ocf_heartbeat_Route.7 \
@@ -94,6 +96,10 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
                           ocf_heartbeat_anything.7 \
                           ocf_heartbeat_apache.7 \
                           ocf_heartbeat_asterisk.7 \
+                          ocf_heartbeat_aws-vpc-move-ip.7 \
+                          ocf_heartbeat_aws-vpc-route53.7 \
+                          ocf_heartbeat_awseip.7 \
+                          ocf_heartbeat_awsvip.7 \
                           ocf_heartbeat_clvm.7 \
                           ocf_heartbeat_conntrackd.7 \
                           ocf_heartbeat_db2.7 \
@@ -114,6 +120,7 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
                           ocf_heartbeat_iscsi.7 \
                           ocf_heartbeat_jboss.7 \
                           ocf_heartbeat_kamailio.7 \
+                          ocf_heartbeat_lvmlockd.7 \
                           ocf_heartbeat_lxc.7 \
                           ocf_heartbeat_mysql.7 \
                           ocf_heartbeat_mysql-proxy.7 \
@@ -122,6 +129,7 @@ man_MANS	       = ocf_heartbeat_AoEtarget.7 \
                           ocf_heartbeat_nfsnotify.7 \
                           ocf_heartbeat_nfsserver.7 \
                           ocf_heartbeat_nginx.7 \
+                          ocf_heartbeat_oraasm.7 \
                           ocf_heartbeat_oracle.7 \
                           ocf_heartbeat_oralsnr.7 \
                           ocf_heartbeat_pgsql.7 \
diff --git a/heartbeat/CTDB b/heartbeat/CTDB
index b23ffae..709dbc8 100755
--- a/heartbeat/CTDB
+++ b/heartbeat/CTDB
@@ -553,6 +553,7 @@ ctdb_start() {
 	persistent_db_dir="${OCF_RESKEY_ctdb_dbdir}/persistent"
 	mkdir -p $persistent_db_dir 2>/dev/null
 	for pdbase in $persistent_db_dir/*.tdb.[0-9]; do
+		[ -f "$pdbase" ] || break
 		/usr/bin/tdbdump "$pdbase" >/dev/null 2>/dev/null || {
 			ocf_exit_reason "Persistent database $pdbase is corrupted!  CTDB will not start."
 			return $OCF_ERR_GENERIC
@@ -572,10 +573,22 @@ ctdb_start() {
 
 	# Use logfile by default, or syslog if asked for
 	local log_option
-	log_option="--logfile=$OCF_RESKEY_ctdb_logfile"
-	if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
-		log_option="--syslog"
-	elif [ ! -d "$(dirname $OCF_RESKEY_ctdb_logfile)" ]; then
+	# --logging supported from v4.3.0 and --logfile / --syslog support 
+	# has been removed from newer versions
+	version=$(ctdb version | awk '{print $NF}')
+	ocf_version_cmp "$version" "4.2.14"
+	if [ "$?" -eq "2" ]; then
+		log_option="--logging=file:$OCF_RESKEY_ctdb_logfile"
+		if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
+			log_option="--logging=syslog"
+		fi
+	else
+		log_option="--logfile=$OCF_RESKEY_ctdb_logfile"
+		if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
+			log_option="--syslog"
+		fi
+	fi
+	if [ ! -d "$(dirname $OCF_RESKEY_ctdb_logfile)" ]; then
 		# ensure the logfile's directory exists, otherwise ctdb will fail to start
 		mkdir -p $(dirname $OCF_RESKEY_ctdb_logfile)
 	fi
diff --git a/heartbeat/IPaddr2 b/heartbeat/IPaddr2
index 27b7208..5fd8dba 100755
--- a/heartbeat/IPaddr2
+++ b/heartbeat/IPaddr2
@@ -55,7 +55,7 @@
 #	OCF_RESKEY_arp_interval
 #	OCF_RESKEY_arp_count
 #	OCF_RESKEY_arp_bg
-#	OCF_RESKEY_arp_mac
+#	OCF_RESKEY_preferred_lft
 #
 #	OCF_RESKEY_CRM_meta_clone
 #	OCF_RESKEY_CRM_meta_clone_max
@@ -78,8 +78,8 @@ OCF_RESKEY_arp_interval_default=200
 OCF_RESKEY_arp_count_default=5
 OCF_RESKEY_arp_count_refresh_default=0
 OCF_RESKEY_arp_bg_default=true
-OCF_RESKEY_arp_mac_default="ffffffffffff"
 OCF_RESKEY_run_arping_default=false
+OCF_RESKEY_preferred_lft_default="forever"
 
 : ${OCF_RESKEY_lvs_support=${OCF_RESKEY_lvs_support_default}}
 : ${OCF_RESKEY_lvs_ipv6_addrlabel=${OCF_RESKEY_lvs_ipv6_addrlabel_default}}
@@ -90,8 +90,8 @@ OCF_RESKEY_run_arping_default=false
 : ${OCF_RESKEY_arp_count=${OCF_RESKEY_arp_count_default}}
 : ${OCF_RESKEY_arp_count_refresh=${OCF_RESKEY_arp_count_refresh_default}}
 : ${OCF_RESKEY_arp_bg=${OCF_RESKEY_arp_bg_default}}
-: ${OCF_RESKEY_arp_mac=${OCF_RESKEY_arp_mac_default}}
 : ${OCF_RESKEY_run_arping=${OCF_RESKEY_run_arping_default}}
+: ${OCF_RESKEY_preferred_lft=${OCF_RESKEY_preferred_lft_default}}
 #######################################################################
 
 SENDARP=$HA_BIN/send_arp
@@ -271,8 +271,12 @@ a unique address to manage
 <parameter name="arp_interval">
 <longdesc lang="en">
 Specify the interval between unsolicited ARP packets in milliseconds.
+
+This parameter is deprecated and used for the backward compatibility only.
+It is effective only for the send_arp binary which is built with libnet,
+and send_ua for IPv6. It has no effect for other arp_sender.
 </longdesc>
-<shortdesc lang="en">ARP packet interval in ms</shortdesc>
+<shortdesc lang="en">ARP packet interval in ms (deprecated)</shortdesc>
 <content type="integer" default="${OCF_RESKEY_arp_interval_default}"/>
 </parameter>
 
@@ -302,24 +306,27 @@ Whether or not to send the ARP packets in the background.
 <content type="string" default="${OCF_RESKEY_arp_bg_default}"/>
 </parameter>
 
-<parameter name="arp_mac">
+<parameter name="arp_sender">
 <longdesc lang="en">
-MAC address to send the ARP packets to.
-
-You really shouldn't be touching this.
-
+The program to send ARP packets with on start. Available options are:
+ - send_arp: default
+ - ipoibarping: default for infiniband interfaces if ipoibarping is available
+ - iputils_arping: use arping in iputils package
+ - libnet_arping: use another variant of arping based on libnet
 </longdesc>
-<shortdesc lang="en">ARP MAC</shortdesc>
-<content type="string" default="${OCF_RESKEY_arp_mac_default}"/>
+<shortdesc lang="en">ARP sender</shortdesc>
+<content type="string" default=""/>
 </parameter>
 
-<parameter name="arp_sender">
+<parameter name="send_arp_opts">
 <longdesc lang="en">
-The program to send ARP packets with on start. For infiniband
-interfaces, default is ipoibarping. If ipoibarping is not
-available, set this to send_arp.
+Extra options to pass to the arp_sender program.
+Available options are vary depending on which arp_sender is used.
+
+A typical use case is specifying '-A' for iputils_arping to use
+ARP REPLY instead of ARP REQUEST as Gratuitous ARPs.
 </longdesc>
-<shortdesc lang="en">ARP sender</shortdesc>
+<shortdesc lang="en">Options for ARP sender</shortdesc>
 <content type="string" default=""/>
 </parameter>
 
@@ -350,6 +357,17 @@ Whether or not to run arping for IPv4 collision detection check.
 <content type="string" default="${OCF_RESKEY_run_arping_default}"/>
 </parameter>
 
+<parameter name="preferred_lft">
+<longdesc lang="en">
+For IPv6, set the preferred lifetime of the IP address.
+This can be used to ensure that the created IP address will not
+be used as a source address for routing.
+Expects a value as specified in section 5.5.4 of RFC 4862.
+</longdesc>
+<shortdesc lang="en">IPv6 preferred lifetime</shortdesc>
+<content type="string" default="${OCF_RESKEY_preferred_lft_default}"/>
+</parameter>
+
 </parameters>
 <actions>
 <action name="start"   timeout="20s" />
@@ -590,6 +608,10 @@ add_interface () {
 		cmd="$cmd label $label"
 		msg="${msg} (with label $label)"
 	fi
+	if [ "$FAMILY" = "inet6" ] ;then
+		cmd="$cmd preferred_lft $OCF_RESKEY_preferred_lft"
+		msg="${msg} (with preferred_lft $OCF_RESKEY_preferred_lft)"
+	fi
 
 	ocf_log info "$msg"
 	ocf_run $cmd || return $OCF_ERR_GENERIC
@@ -687,19 +709,80 @@ is_infiniband() {
 	$IP2UTIL link show $NIC | grep link/infiniband >/dev/null
 }
 
-#
-# Run send_arp to note peers about new mac address
-#
-run_send_arp() {
-	if [ "x$IP_CIP" = "xyes" ] ; then
-	    if [ x = "x$IF_MAC" ] ; then
-		MY_MAC=auto
+log_arp_sender() {
+    local cmdline
+    local output
+    local rc
+    cmdline="$@"
+
+    output=$($cmdline 2>&1)
+    rc=$?
+    if [ $rc -ne 0 ] && \
+       [ "$ARP_SENDER" != "libnet_arping" ] ; then
+        # libnet_arping always return an error as no answers
+        ocf_log err "Could not send gratuitous arps: rc=$rc"
+    fi
+    ocf_log $LOGLEVEL "$output"
+}
+
+# wrapper function to manage PID file to run arping in background
+run_with_pidfile() {
+    local cmdline
+    local pid
+    local rc
+
+    cmdline="$@"
+
+    $cmdline &
+    pid=$!
+    echo "$pid" > $SENDARPPIDFILE
+    wait $pid
+    rc=$?
+    rm -f $SENDARPPIDFILE
+    return $rc
+}
+
+build_arp_sender_cmd() {
+    case "$ARP_SENDER" in
+	send_arp)
+	    if [ "x$IP_CIP" = "xyes" ] ; then
+	        if [ x = "x$IF_MAC" ] ; then
+		    MY_MAC=auto
+	        else
+		    # send_arp.linux should return without doing anything in this case
+		    MY_MAC=`echo ${IF_MAC} | sed -e 's/://g'`
+	        fi
 	    else
-		MY_MAC=`echo ${IF_MAC} | sed -e 's/://g'`
+		    MY_MAC=auto
 	    fi
-	else
-		MY_MAC=auto
-	fi
+
+	    ARGS="$OCF_RESKEY_send_arp_opts -i $OCF_RESKEY_arp_interval -r $ARP_COUNT -p $SENDARPPIDFILE $NIC $OCF_RESKEY_ip $MY_MAC not_used not_used"
+	    ARP_SENDER_CMD="$SENDARP $ARGS"
+	    ;;
+	iputils_arping)
+	    ARGS="$OCF_RESKEY_send_arp_opts -U -c $ARP_COUNT -I $NIC $OCF_RESKEY_ip"
+	    ARP_SENDER_CMD="run_with_pidfile arping $ARGS"
+	    ;;
+	libnet_arping)
+	    ARGS="$OCF_RESKEY_send_arp_opts -U -c $ARP_COUNT -i $NIC -S $OCF_RESKEY_ip $OCF_RESKEY_ip"
+	    ARP_SENDER_CMD="run_with_pidfile arping $ARGS"
+	    ;;
+	ipoibarping)
+	    ARGS="-q -c $ARP_COUNT -U -I $NIC $OCF_RESKEY_ip"
+	    ARP_SENDER_CMD="ipoibarping $ARGS"
+	    ;;
+	*)
+	    # should not occur
+	    ocf_exit_reason "unrecognized arp_sender value: $ARP_SENDER"
+	    exit $OCF_ERR_GENERIC
+	    ;;
+    esac
+}
+
+#
+# Send Unsolicited ARPs to update neighbor's ARP cache
+#
+run_arp_sender() {
 	if [ "x$1" = "xrefresh" ] ; then
 		ARP_COUNT=$OCF_RESKEY_arp_count_refresh
 		LOGLEVEL=debug
@@ -707,17 +790,32 @@ run_send_arp() {
 		ARP_COUNT=$OCF_RESKEY_arp_count
 		LOGLEVEL=info
 	fi
-	if [ $ARP_COUNT -ne 0 ] ; then
-		ARGS="-i $OCF_RESKEY_arp_interval -r $ARP_COUNT -p $SENDARPPIDFILE $NIC $OCF_RESKEY_ip $MY_MAC not_used not_used"
-		ocf_log $LOGLEVEL "$SENDARP $ARGS"
-		if ocf_is_true $OCF_RESKEY_arp_bg; then
-			($SENDARP $ARGS || ocf_log err "Could not send gratuitous arps")& >&2
-		else
-			$SENDARP $ARGS || ocf_log err "Could not send gratuitous arps"
-		fi
+	if [ $ARP_COUNT -eq 0 ] ; then
+		return
+	fi
+
+	# do not need to send Gratuitous ARPs in the Cluster IP configuration
+	# except send_arp.libnet binary to retain the old behavior
+	if [ "x$IP_CIP" = "xyes" ] && \
+	   [ "x$ARP_SENDER" != "xsend_arp" ] ; then
+		ocf_log info "Gratuitous ARPs are not sent in the Cluster IP configuration"
+		return
+	fi
+
+        # prepare arguments for each arp sender program
+        # $ARP_SENDER_CMD should be set
+	build_arp_sender_cmd
+
+	ocf_log $LOGLEVEL "$ARP_SENDER_CMD"
+
+	if ocf_is_true $OCF_RESKEY_arp_bg; then
+		log_arp_sender $ARP_SENDER_CMD &
+	else
+		log_arp_sender $ARP_SENDER_CMD
 	fi
 }
 
+
 #
 # Run send_ua to note send ICMPv6 Unsolicited Neighbor Advertisements.
 #
@@ -757,28 +855,6 @@ run_send_ua() {
 	$SENDUA $ARGS || ocf_log err "Could not send ICMPv6 Unsolicited Neighbor Advertisements."
 }
 
-#
-# Run ipoibarping to note peers about new Infiniband address
-#
-run_send_ib_arp() {
-	if [ "x$1" = "xrefresh" ] ; then
-		ARP_COUNT=$OCF_RESKEY_arp_count_refresh
-		LOGLEVEL=debug
-	else
-		ARP_COUNT=$OCF_RESKEY_arp_count
-		LOGLEVEL=info
-	fi
-	if [ $ARP_COUNT -ne 0 ] ; then
-		ARGS="-q -c $ARP_COUNT -U -I $NIC $OCF_RESKEY_ip"
-		ocf_log $LOGLEVEL "ipoibarping $ARGS"
-		if ocf_is_true $OCF_RESKEY_arp_bg; then
-			(ipoibarping $ARGS || ocf_log err "Could not send gratuitous arps")& >&2
-		else
-			ipoibarping $ARGS || ocf_log err "Could not send gratuitous arps"
-		fi
-	fi
-}
-
 # Do we already serve this IP address on the given $NIC?
 #
 # returns:
@@ -910,7 +986,7 @@ ip_start() {
 		;;
 	*)
 		if [ $FAMILY = "inet" ];then
-		    $ARP_SEND_FUN
+		    run_arp_sender
 		else
 		    if [ -x $SENDUA ]; then
 			run_send_ua
@@ -941,8 +1017,8 @@ ip_stop() {
 			ocf_log warn "Could not kill previously running send_arp for $OCF_RESKEY_ip"
 		else
 			ocf_log info "killed previously running send_arp for $OCF_RESKEY_ip"
-			rm -f "$SENDARPPIDFILE"
 		fi
+		rm -f "$SENDARPPIDFILE"
 	fi
 	local ip_status=`ip_served`
 	ocf_log info "IP status = $ip_status, IP_CIP=$IP_CIP"
@@ -997,7 +1073,7 @@ ip_monitor() {
 	local ip_status=`ip_served`
 	case $ip_status in
 	ok)
-		$ARP_SEND_FUN refresh
+		run_arp_sender refresh
 		return $OCF_SUCCESS
 		;;
 	partial|no|partial2)
@@ -1012,29 +1088,35 @@ ip_monitor() {
 
 # make sure that we have something to send ARPs with
 set_send_arp_program() {
-    ARP_SEND_FUN=run_send_arp
+    ARP_SENDER=send_arp
     if [ -n "$OCF_RESKEY_arp_sender" ]; then
 	case "$OCF_RESKEY_arp_sender" in
 	send_arp)
 	    check_binary $SENDARP
 	;;
+	iputils_arping)
+	    check_binary arping
+	;;
+	libnet_arping)
+	    check_binary arping
+	;;
 	ipoibarping)
 	    check_binary ipoibarping
-	    ARP_SEND_FUN=run_send_ib_arp
 	;;
 	*)
 	    ocf_exit_reason "unrecognized arp_sender value: $OCF_RESKEY_arp_sender"
 	    exit $OCF_ERR_CONFIGURED
 	;;
 	esac
+	ARP_SENDER="$OCF_RESKEY_arp_sender"
     else
 	if is_infiniband; then
-	    ARP_SEND_FUN=run_send_ib_arp
+	    ARP_SENDER=ipoibarping
 	    if ! have_binary ipoibarping; then
 	    	[ "$__OCF_ACTION" = start ] &&
 		    ocf_log warn "using send_arp for infiniband because ipoibarping is not available (set arp_sender to \"send_arp\" to suppress this message)"
 		check_binary $SENDARP
-		ARP_SEND_FUN=run_send_arp
+		ARP_SENDER=send_arp
 	    fi
 	fi
     fi
@@ -1076,6 +1158,11 @@ ip_validate() {
 	exit $OCF_ERR_CONFIGURED
     fi
 
+    if [ -z "$OCF_RESKEY_preferred_lft" ]; then
+	ocf_exit_reason "Empty value is invalid for OCF_RESKEY_preferred_lft"
+	exit $OCF_ERR_CONFIGURED
+    fi
+
     if [ -n "$IP_CIP" ]; then
 
 	local valid=1
diff --git a/heartbeat/LVM b/heartbeat/LVM
index 90a900b..aa1276e 100755
--- a/heartbeat/LVM
+++ b/heartbeat/LVM
@@ -51,9 +51,6 @@ usage() {
 EOF
 }
 
-# default for "tag"
-OUR_TAG="pacemaker"
-
 meta_data() {
 	cat <<EOF
 <?xml version="1.0"?>
@@ -93,7 +90,7 @@ can be as simple as setting 'volume_list = []' depending on your setup.
 If "exclusive" is set on a non clustered volume group, this overrides the tag to be used.
 </longdesc>
 <shortdesc lang="en">Exclusive activation tag</shortdesc>
-<content type="string" default="$OUR_TAG" />
+<content type="string" default="pacemaker" />
 </parameter>
 
 <parameter name="partial_activation" unique="0" required="0">
@@ -138,158 +135,29 @@ EOF
 }
 
 ##
-# returns mode
 #
-# 0 = normal (non-exclusive) local activation
-# 1 = tagged-exclusive activation
-# 2 = clvm-exclusive activation
+# plain = normal (non-exclusive) local activation
+# tag = tagged-exclusive activation
+# clvm = clvm-exclusive activation
+#
+# the mode specific implementation is in lvm-$mode.sh
 ##
-VG_MODE=
-get_vg_mode()
-{
-	if [ -n "$VG_MODE" ]; then
-		echo "$VG_MODE"
-		return
-	fi
 
-	VG_MODE=0
+set_lvm_mode() {
+	local mode
+
 	if ocf_is_true "$OCF_RESKEY_exclusive"; then
 		case $(vgs -o attr --noheadings $OCF_RESKEY_volgrpname | tr -d ' ') in
 		?????c*)
-			VG_MODE=2 ;;
+			mode="clvm" ;;
 		*)
-			VG_MODE=1 ;;
+			mode="tag" ;;
 		esac
+	else
+		mode="plain"
 	fi
 
-	echo "$VG_MODE"
-}
-
-##
-# Verify tags setup
-##
-verify_tags_environment()
-{
-	##
-	# The volume_list must be initialized to something in order to
-	# guarantee our tag will be filtered on startup
-	##
-	if ! lvm dumpconfig activation/volume_list; then
-		ocf_log err  "LVM: Improper setup detected"
-		ocf_exit_reason "The volume_list filter must be initialized in lvm.conf for exclusive activation without clvmd"
-		return $OCF_ERR_GENERIC
-	fi
-
-	##
-	# Our tag must _NOT_ be in the volume_list.  This agent
-	# overrides the volume_list during activation using the
-	# special tag reserved for cluster activation
-	##
-	if lvm dumpconfig activation/volume_list | grep -e "\"@$OUR_TAG\"" -e "\"${OCF_RESKEY_volgrpname}\""; then
-		ocf_log err "LVM:  Improper setup detected"
-		ocf_exit_reason "The volume_list in lvm.conf must not contain the cluster tag, \"$OUR_TAG\", or volume group, $OCF_RESKEY_volgrpname"
-		return $OCF_ERR_GENERIC
-	fi
-
-	return $OCF_SUCCESS
-}
-
-check_initrd_warning()
-{
-	# First check to see if there is an initrd img we can safely
-	# compare timestamps agaist.  If not, don't even bother with
-	# this check.  This is known to work in rhel/fedora distros
-	ls "/boot/*$(uname -r)*.img" > /dev/null 2>&1
-	if [ $? -ne 0 ]; then
-		return
-	fi
-
-	##
-	# Now check to see if the initrd has been updated.
-	# If not, the machine could boot and activate the VG outside
-	# the control of pacemaker
-	##
-	if [ "$(find /boot -name *.img -newer /etc/lvm/lvm.conf)" = "" ]; then
-		ocf_log warn "LVM:  Improper setup detected"
-		ocf_log warn "* initrd image needs to be newer than lvm.conf"
-
-		# While dangerous if not done the first time, there are many
-		# cases where we don't simply want to fail here.  Instead,
-		# keep warning until the user remakes the initrd - or has
-		# it done for them by upgrading the kernel.
-		#
-		# initrd can be updated using this command.
-		# dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r)
-		#
-	fi
-}
-
-##
-# does this vg have our tag
-##
-check_tags()
-{
-	local owner=`vgs -o tags --noheadings $OCF_RESKEY_volgrpname | tr -d ' '`
-
-	if [ -z "$owner" ]; then
-		# No-one owns this VG yet
-		return 1
-	fi
-
-	if [ "$OUR_TAG" = "$owner" ]; then
-		# yep, this is ours
-		return 0
-	fi
-
-	# some other tag is set on this vg
-	return 2
-}
-
-strip_tags()
-{
-	local i
-
-	for i in `vgs --noheadings -o tags $OCF_RESKEY_volgrpname | sed s/","/" "/g`; do
-		ocf_log info "Stripping tag, $i"
-
-		# LVM version 2.02.98 allows changing tags if PARTIAL
-		vgchange --deltag $i $OCF_RESKEY_volgrpname
-	done
-
-	if [ ! -z `vgs -o tags --noheadings $OCF_RESKEY_volgrpname | tr -d ' '` ]; then
-		ocf_exit_reason "Failed to remove ownership tags from $OCF_RESKEY_volgrpname"
-		return $OCF_ERR_GENERIC
-	fi
-
-	return $OCF_SUCCESS
-}
-
-set_tags()
-{
-	check_tags
-	case $? in
-	0)
-		# we already own it.
-		return $OCF_SUCCESS
-		;;
-	2)
-		# other tags are set, strip them before setting
-		if ! strip_tags; then
-			return $OCF_ERR_GENERIC
-		fi
-		;;
-	*)
-		: ;;
-	esac
-
-	vgchange --addtag $OUR_TAG $OCF_RESKEY_volgrpname
-	if [ $? -ne 0 ]; then
-		ocf_exit_reason "Failed to add ownership tag to $OCF_RESKEY_volgrpname"
-		return $OCF_ERR_GENERIC
-	fi
-
-	ocf_log info "New tag \"$OUR_TAG\" added to $OCF_RESKEY_volgrpname"
-	return $OCF_SUCCESS
+	. ${OCF_FUNCTIONS_DIR}/lvm-${mode}.sh
 }
 
 #
@@ -323,25 +191,8 @@ LVM_status() {
 		ocf_log $loglevel "LVM Volume $1 is not available (stopped)"
 		rc=$OCF_NOT_RUNNING
 	else
-		case $(get_vg_mode) in
-		1) # exclusive with tagging.
-			# If vg is running, make sure the correct tag is present. Otherwise we
-			# can not guarantee exclusive activation.
-			if ! check_tags; then
-				ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\""
-				rc=$OCF_ERR_GENERIC
-			fi
-
-			# make sure the environment for tags activation is still valid
-			if ! verify_tags_environment; then
-				rc=$OCF_ERR_GENERIC
-			fi
-			# let the user know if their initrd is older than lvm.conf.
-			check_initrd_warning
-			;;
-		*)
-			: ;;
-		esac
+		lvm_status
+		rc=$?
 	fi
 
 	if [ "X${2}" = "X" ]; then
@@ -358,63 +209,17 @@ LVM_status() {
 	return $rc
 }
 
-get_activate_options()
-{
-	local options="-a"
-
-	case $(get_vg_mode) in
-	0) options="${options}ly";;
-	1) options="${options}y --config activation{volume_list=[\"@${OUR_TAG}\"]}";;
-	2) options="${options}ey";;
-	esac
-
-	if ocf_is_true "$OCF_RESKEY_partial_activation" ; then
-		options="${options} --partial"
-	fi
-
-	# for clones (clustered volume groups), we'll also have to force
-	# monitoring, even if disabled in lvm.conf.
-	if ocf_is_clone; then
-		options="$options --monitor y"
-	fi
-	
-	echo $options
-}
-
-##
-# Attempt to deactivate vg cluster wide and then start the vg exclusively
-##
-retry_exclusive_start()
-{
-	local vgchange_options="$(get_activate_options)"
-
-	# Deactivate each LV in the group one by one cluster wide
-	set -- $(lvs -o name,attr --noheadings $OCF_RESKEY_volgrpname 2> /dev/null)
-	while [ $# -ge 2 ]; do
-		case $2 in
-		????ao*)
-			# open LVs cannot be deactivated.
-			return $OCF_ERR_GENERIC;;
-		*)
-			if ! lvchange -an $OCF_RESKEY_volgrpname/$1; then
-				ocf_exit_reason "Unable to perform required deactivation of $OCF_RESKEY_volgrpname/$1 before starting"
-				return $OCF_ERR_GENERIC
-			fi
-			;;
-		esac
-		shift 2
-	done
-
-	ocf_run vgchange $vgchange_options $OCF_RESKEY_volgrpname
-}
-
 #
 #	Enable LVM volume
 #
 LVM_start() {
-	local vgchange_options="$(get_activate_options)"
 	local vg=$1
-	local clvmd=0
+
+	# systemd drop-in to stop process before storage services during
+	# shutdown/reboot
+	if ps -p 1 | grep -q systemd ; then
+		systemd_drop_in "99-LVM" "After" "blk-availability.service"
+	fi
 
 	# TODO: This MUST run vgimport as well
 	ocf_log info "Activating volume group $vg"
@@ -424,33 +229,9 @@ LVM_start() {
 		ocf_run vgscan
 	fi
 
-	case $(get_vg_mode) in
-	2)
-		clvmd=1
-		;;
-	1)
-		if ! set_tags; then
-			return $OCF_ERR_GENERIC
-		fi
-		;;
-	*)
-		: ;;
-	esac
-
-	if ! ocf_run vgchange $vgchange_options $vg; then
-		if [ $clvmd -eq 0 ]; then
-			return $OCF_ERR_GENERIC
-		fi
-
-		# Failure to exclusively activate cluster vg.:
-		# This could be caused by a remotely active LV, Attempt
-		# to disable volume group cluster wide and try again.
-		# Allow for some settling
-		sleep 5
-		if ! retry_exclusive_start; then
-			return $OCF_ERR_GENERIC
-		fi
-	fi
+	lvm_pre_activate || exit
+	ocf_run vgchange $vgchange_activate_options $vg
+	lvm_post_activate $?
 
 	if LVM_status $vg; then
 		: OK Volume $vg activated just fine!
@@ -466,7 +247,6 @@ LVM_start() {
 #
 LVM_stop() {
 	local res=$OCF_ERR_GENERIC
-	local vgchange_options="-aln"
 	local vg=$1
 
 	if ! vgs $vg > /dev/null 2>&1; then
@@ -476,13 +256,11 @@ LVM_stop() {
 
 	ocf_log info "Deactivating volume group $vg"
 
-	case $(get_vg_mode) in
-		1) vgchange_options="-an" ;;
-	esac
+	lvm_pre_deactivate || exit
 
 	for i in $(seq 10)
 	do
-		ocf_run vgchange $vgchange_options $vg
+		ocf_run vgchange $vgchange_deactivate_options $vg
 		res=$?
 		if LVM_status $vg; then
 			ocf_exit_reason "LVM: $vg did not stop correctly"
@@ -500,16 +278,7 @@ LVM_stop() {
 		which udevadm > /dev/null 2>&1 && udevadm settle --timeout=5
 	done
 
-	case $(get_vg_mode) in
-	1)
-		if [ $res -eq 0 ]; then
-			strip_tags
-			res=$?
-		fi
-		;;
-	esac
-
-	return $res
+	lvm_post_deactivate $res
 }
 
 #
@@ -583,46 +352,18 @@ LVM_validate_all() {
 		exit $OCF_ERR_GENERIC
 	fi
 
-	##
-	# If exclusive activation is not enabled, then
-	# further checking of proper setup is not necessary
-	##
-	if ! ocf_is_true "$OCF_RESKEY_exclusive"; then
-		return $OCF_SUCCESS;
+	if lvs --noheadings -o segtype | grep -q "cache"; then
+		if ! lvs --noheadings -o cache_mode "$OCF_RESKEY_volgrpname" | grep -q "writethrough"; then
+			ocf_log warn "LVM CACHE IS NOT IN WRITETHROUGH MODE. THIS IS NOT A SUPPORTED CONFIGURATION."
+		fi
 	fi
 
-	##
-	# Having cloned lvm resources with exclusive vg activation makes no sense at all.
-	##
-	if ocf_is_clone; then
+	if ocf_is_clone && ocf_is_true "$OCF_RESKEY_exclusive"; then
 		ocf_exit_reason "cloned lvm resources can not be activated exclusively"
 		exit $OCF_ERR_CONFIGURED
 	fi
 
-	##
-	# Make sure the cluster attribute is set and clvmd is up when exclusive
-	# activation is enabled. Otherwise we can't exclusively activate the volume group.
-	##
-	case $(get_vg_mode) in
-	1)  # exclusive activation using tags
-		if ! verify_tags_environment; then
-			exit $OCF_ERR_GENERIC
-		fi
-		;;
-	2)  # exclusive activation with clvmd
-		##
-		# verify is clvmd running
-		##
-		if ! ps -C clvmd > /dev/null 2>&1; then
-			ocf_exit_reason "$OCF_RESKEY_volgrpname has the cluster attribute set, but 'clvmd' is not running"
-			exit $OCF_ERR_GENERIC
-		fi
-		;;
-	*)
-		: ;;
-	esac
-
-	return $OCF_SUCCESS
+	lvm_validate_all
 }
 
 #
@@ -686,8 +427,10 @@ LVM_MAJOR="${LVM_VERSION%%.*}"
 VOLUME=$OCF_RESKEY_volgrpname
 OP_METHOD=$1
 
-if [ -n "$OCF_RESKEY_tag" ]; then
-	OUR_TAG=$OCF_RESKEY_tag
+set_lvm_mode
+lvm_init
+if ocf_is_true "$OCF_RESKEY_partial_activation" ; then
+	vgchange_activate_options="${vgchange_activate_options} --partial"
 fi
 
 # What kind of method was invoked?
diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
new file mode 100755
index 0000000..4004e56
--- /dev/null
+++ b/heartbeat/LVM-activate
@@ -0,0 +1,840 @@
+#!/bin/sh
+#
+#
+# Copyright (c) 2017 SUSE LINUX, Eric Ren
+#			All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like.	Any license provided herein, whether implied or
+# otherwise, applies only to this software file.  Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+# LVM-activate OCF Resource Agent:
+#
+# Logical volume manager (LVM) provides new features for cluster enviroment:
+# lvmlockd and system ID, which aims to replace clvmd and tagged-exclusive
+# activation. Accordingly, we have created a new resource agent named "lvmlockd"
+# to manage lvmlockd daemon. In addition, this new resource agent "LVM-activate"
+# is created to take care of LVM activation/deactivation work. This agent supports
+# the new features: lvmlockd and system ID, and also supports the old features:
+# clvmd and lvm tag.
+#
+# Thanks David Teigland! He is the author of these LVM features, giving valuable
+# idea/feedback about this resource agent.
+############################################################################
+
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+# If LV is given, only activate this named LV; otherwise, activate all
+# LVs in the named VG.
+VG=${OCF_RESKEY_vgname}
+LV=${OCF_RESKEY_lvname}
+
+# How LVM controls access to the VG:
+#
+# 0: place-holder for any incorrect cases; To be safe, we enforce the VG
+#    must use any of the following protection methods in cluster environment.
+# 1: vg is shared - lvmlockd (new)
+# 2: vg is clustered - clvmd (old)
+# 3: vg has system_id (new)
+# 4: vg has tagging (old)
+VG_access_mode=${OCF_RESKEY_vg_access_mode}
+
+# Activate LV(s) with "shared" lock for cluster fs
+# or "exclusive" lock for local fs
+LV_activation_mode=${OCF_RESKEY_activation_mode:-exclusive}
+
+# For system ID feature
+SYSTEM_ID=""
+
+# For tagging activation mode
+OUR_TAG=${OCF_RESKEY_tag:-pacemaker}
+
+#######################################################################
+
+meta_data() {
+	cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+
+
+<resource-agent name="LVM-activate">
+<version>1.0</version>
+
+<longdesc lang="en">
+This agent manages LVM activation/deactivation work for a given volume group.
+
+It supports the following modes, controlled by the vg_access_mode parameter:
+
+* lvmlockd
+* system_id
+* clvmd
+* tagging
+
+Notes:
+
+1. There are two possible configuration combinations: lvmlockd+LVM-activate and
+clvm+LVM-activate. However, it is not possible to use both at the same time!
+
+2. Put all "lvmlockd"/"clvmd" volume groups into auto management by the agent
+if using the cluster to manage at least one of them.  If you manage some manually,
+the stop action of the lvmlockd agent may fail and the node may get fenced,
+because some DLM lockspaces might be in use and cannot be closed automatically.
+</longdesc>
+<shortdesc lang="en">This agent activates/deactivates logical volumes.</shortdesc>
+
+<parameters>
+<parameter name="vgname" unique="1" required="1">
+<longdesc lang="en">
+The volume group name.
+</longdesc>
+<shortdesc lang="en">The volume group name</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="lvname" unique="0" required="0">
+<longdesc lang="en">
+If set, only the specified LV will be activated.
+</longdesc>
+<shortdesc lang="en">Only activate the given LV</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="vg_access_mode" unique="0" required="1">
+<longdesc lang="en">
+This option decides which solution will be used to protect the volume group in
+cluster environment. Optional solutions are: lvmlockd, clvmd, system_id and
+tagging.
+</longdesc>
+<shortdesc lang="en">The VG access mode</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="activation_mode" unique="0" required="0">
+<longdesc lang="en">
+The activation mode decides the visibility of logical volumes in the cluster. There
+are two different modes: "shared" for cluster filesystem and "exclusive" for local
+filesystem. With "shared", an LV can be activated concurrently from multiple nodes.
+With "exclusive", an LV can be activated by one node at a time.
+
+This option only has effect on "lvmlockd"/"clvmd" vg_access_mode. For "system_id"
+and "tagging", they always mean exclusive activation.
+</longdesc>
+<shortdesc lang="en">Logical volume activation mode</shortdesc>
+<content type="string" default="exclusive" />
+</parameter>
+
+<parameter name="tag" unique="0" required="0">
+<longdesc lang="en">
+The tag used for tagging activation mode.
+</longdesc>
+<shortdesc lang="en">The tag used for tagging activation mode</shortdesc>
+<content type="string" default="pacemaker" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start"		timeout="90" />
+<action name="stop"		timeout="90" />
+<action name="monitor"		timeout="90" interval="30" depth="0" />
+<action name="meta-data"	timeout="10" />
+<action name="validate-all"	timeout="20" />
+</actions>
+</resource-agent>
+END
+}
+
+#######################################################################
+
+usage() {
+	cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+END
+}
+
+# VG access modes:
+# 0: unsafe to activate LV without proper protection in cluster
+# 1: vg is shared - lvmlockd (new)
+# 2: vg is clustered - clvmd (old)
+# 3: vg has system_id (new)
+# 4: vg has tagging (old)
+get_VG_access_mode() {
+	local access_mode
+	local vg_locktype
+	local vg_clustered
+	local vg_systemid
+	local vg_tags
+
+	# Use -o reporting fields to get multiple bits of info from a single command
+	kvs=$(vgs --foreign --nolocking --noheadings --nameprefixes \
+		--rows --config report/compact_output=0 \
+		-o name,clustered,lock_type,systemid,tags ${VG} 2>/dev/null | tr -d \')
+	export ${kvs}
+	vg_locktype=${LVM2_VG_LOCK_TYPE}
+	vg_clustered=${LVM2_VG_CLUSTERED}
+	vg_systemid=${LVM2_VG_SYSTEMID}
+	vg_tags=${LVM2_VG_TAGS}
+
+	# We know this VG is using lvmlockd if the lock type is dlm.
+	if [ "$vg_locktype" = "dlm" ]; then
+		access_mode=1
+	elif [ "$vg_clustered" = "clustered" ]; then
+		access_mode=2
+	elif [ -n "$vg_systemid" ]; then
+		SYSTEM_ID=$(lvm systemid 2>/dev/null | cut -d':' -f2 | tr -d '[:blank:]')
+		access_mode=3
+	elif [ -n "$vg_tags" ]; then
+		# TODO:
+		# We don't have reliable way to test if tagging activation is used.
+		access_mode=4
+	else
+		access_mode=0
+	fi
+
+	return $access_mode
+}
+
+# TODO: All tagging activation code is almost copied from LVM RA!!!
+# But, the old LVM RA just uses the ordinary tags, not the "hosttag" feature
+# which may be a better method for active-inactive cluster scenario.
+#
+# We have two choice:
+# 1. Continue to use the LVM way, which may work well on old system.
+# 2. Change to use the real hosttag feature, but it looks very same
+# to systemID.
+# Anyway, we can easily change this if anyone requests with good reasons.
+
+# does this vg have our tag
+check_tags()
+{
+	local owner=$(vgs -o tags --noheadings ${VG} | tr -d '[:blank:]')
+
+        if [ -z "$owner" ]; then
+                # No-one owns this VG yet
+                return 1
+        fi
+
+        if [ "$OUR_TAG" = "$owner" ]; then
+                # yep, this is ours
+                return 0
+        fi
+
+        # some other tag is set on this vg
+        return 2
+}
+
+strip_tags()
+{
+        local tag
+
+	for tag in $(vgs --noheadings -o tags $OCF_RESKEY_volgrpname | sed s/","/" "/g); do
+                ocf_log info "Stripping tag, $tag"
+
+                # LVM version 2.02.98 allows changing tags if PARTIAL
+                vgchange --deltag "$tag" ${VG}
+        done
+
+	if [ ! -z $(vgs -o tags --noheadings ${VG} | tr -d '[:blank:]') ]; then
+                ocf_exit_reason "Failed to remove ownership tags from ${VG}"
+                exit $OCF_ERR_GENERIC
+        fi
+
+        return $OCF_SUCCESS
+}
+
+set_tags()
+{
+        case check_tags in
+        0)
+                # we already own it.
+                return $OCF_SUCCESS
+                ;;
+        2)
+                # other tags are set, strip them before setting
+                if ! strip_tags; then
+                        return $OCF_ERR_GENERIC
+                fi
+                ;;
+        *)
+                : ;;
+        esac
+
+        if ! vgchange --addtag $OUR_TAG ${VG} ; then
+                ocf_log err "Failed to add ownership tag to ${VG}"
+                return $OCF_ERR_GENERIC
+        fi
+
+        ocf_log info "New tag \"${OUR_TAG}\" added to ${VG}"
+        return $OCF_SUCCESS
+}
+
+# Parameters:
+# 1st: config item name
+# 2nd: expected config item value
+config_verify()
+{
+	local name=$1
+	local expect=$2
+	local real=""
+
+	real=$(lvmconfig "$name" | cut -d'=' -f2)
+	if [ "$real" != "$expect" ]; then
+		ocf_exit_reason "config item $name: expect=$expect but real=$real"
+		exit $OCF_ERR_CONFIGURED
+
+	fi
+
+	return $OCF_SUCCESS
+}
+
+lvmlockd_check()
+{
+	config_verify "global/use_lvmlockd" "1"
+	config_verify "global/locking_type" "1"
+
+	# We recommend to activate one LV at a time so that this specific volume
+	# binds to a proper filesystem to protect the data
+	# TODO:
+	# Will this warn message be too noisy?
+	if [ -z "$LV" ]; then
+		ocf_log warn "You are recommended to activate one LV at a time or use exclusive activation mode."
+	fi
+
+	# Good: lvmlockd is running, and clvmd is not running
+	if ! pgrep lvmlockd >/dev/null 2>&1 ; then
+		ocf_exit_reason "lvmlockd daemon is not running!"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	if pgrep clvmd >/dev/null 2>&1 ; then
+		ocf_exit_reason "clvmd daemon is running unexpectedly."
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	return $OCF_SUCCESS
+}
+
+clvmd_check()
+{
+	config_verify "global/use_lvmetad" "0"
+	config_verify "global/use_lvmlockd" "0"
+	config_verify "global/locking_type" "3"
+
+	# TODO:
+	# David asked a question: does lvchange -aey works well enough with clvmd?
+	#
+	# Corey said: I think it does work well enough. We do a fair amount of exclusive
+	# activation clvm testing, and my experience is you'll get the LV activated on
+	# the node you ran the command on. But, I think the specific scenario and issue
+	# that surprised us all was when the LV was *already* exclusively active on say
+	# nodeA, and nodeB then attempts to also exclusively activate it as well. Instead
+	# of failing, the activation succeeds even though nodeB activation didn't occur.
+	# This is documented in the following bug:
+	#	https://bugzilla.redhat.com/show_bug.cgi?id=1191724#c8
+	# Technically, you're not guaranteed to have it activated on the node you run
+	# the cmd on, but again, that's not been my experience.
+	#
+	# Eric: Put the interesting discussion here so that we can be more careful on this.
+
+	# Good: clvmd is running, and lvmlockd is not running
+	if ! pgrep clvmd >/dev/null 2>&1 ; then
+		ocf_exit_reason "clvmd daemon is not running!"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	if pgrep lvmetad >/dev/null 2>&1 ; then
+		ocf_exit_reason "Please stop lvmetad daemon when clvmd is running."
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	if pgrep lvmlockd >/dev/null 2>&1 ; then
+		ocf_exit_reason "lvmlockd daemon is running unexpectedly."
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	return $OCF_SUCCESS
+}
+
+systemid_check()
+{
+	local source
+
+	# system_id_source is set in lvm.conf
+	source=$(lvmconfig 'global/system_id_source' 2>/dev/null | cut -d"=" -f2)
+	if [ "$source" = "" ] || [ "$source" = "none" ]; then
+		ocf_exit_reason "system_id_source in lvm.conf is not set correctly!"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	if [ -z ${SYSTEM_ID} ]; then
+		ocf_exit_reason "local/system_id is not set!"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	return $OCF_SUCCESS
+}
+
+# Verify tags setup
+tagging_check()
+{
+        # The volume_list must be initialized to something in order to
+        # guarantee our tag will be filtered on startup
+        if ! lvm dumpconfig activation/volume_list; then
+                ocf_log err  "LVM: Improper setup detected"
+                ocf_exit_reason "The volume_list filter must be initialized in lvm.conf for exclusive          activation without clvmd"
+		exit $OCF_ERR_CONFIGURED
+        fi
+
+        # Our tag must _NOT_ be in the volume_list.  This agent
+        # overrides the volume_list during activation using the
+        # special tag reserved for cluster activation
+        if lvm dumpconfig activation/volume_list | grep -e "\"@${OUR_TAG}\"" -e "\"${VG}\"";  then
+                ocf_log err "LVM:  Improper setup detected"
+                ocf_exit_reason "The volume_list in lvm.conf must not contain the cluster tag, \"${OUR_TAG}\",   or volume group, ${VG}"
+		exit $OCF_ERR_CONFIGURED
+        fi
+
+        return $OCF_SUCCESS
+}
+
+lvm_validate() {
+	local lv_count
+	local mode
+
+	check_binary pgrep
+	# Every LVM command is just symlink to lvm binary
+	check_binary lvm
+	check_binary dmsetup
+
+	if ! vgs --foreign ${VG} >/dev/null 2>&1 ; then
+		# stop action exits successfully if the VG cannot be accessed...
+		if [ $__OCF_ACTION = "stop" ]; then
+			ocf_log warn "VG [${VG}] cannot be accessed, stop action exits successfully."
+			exit $OCF_SUCCESS
+		fi
+
+		ocf_exit_reason "Volume group[${VG}] doesn't exist, or not visible on this node!"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	# Get the access mode from VG metadata and check if it matches the input
+	# value. Skip to check "tagging" mode because there's no reliable way to
+	# automatically check if "tagging" mode is being used.
+	get_VG_access_mode
+	mode=$?
+	if [ $VG_access_mode -ne 4 ] && [ $mode -ne $VG_access_mode ]; then
+		ocf_exit_reason "The specified vg_access_mode doesn't match the mode on VG metadata!"
+		exit $OCF_ERR_ARGS
+	fi
+
+	# Nothing to do if the VG has no logical volume
+	lv_count=$(vgs --foreign -o lv_count --noheadings ${VG} 2>/dev/null)
+	if [ $lv_count -lt 1 ]; then
+		ocf_exit_reason "Volume group [$VG] doesn't contain any logical volume!"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	# Check if the given $LV is in the $VG
+	if [ -n "$LV" ]; then
+		OUT=$(lvs --foreign --noheadings ${VG}/${LV} 2>&1)
+		if [ $? -ne 0 ]; then
+			ocf_log err "lvs: ${OUT}"
+			ocf_exit_reason "LV ($LV) is not in the given VG ($VG)."
+			exit $OCF_ERR_ARGS
+		fi
+	fi
+
+	# VG_access_mode specific checking goes here
+	case ${VG_access_mode} in
+	1)
+		lvmlockd_check
+		;;
+	2)
+		clvmd_check
+		;;
+	3)
+		systemid_check
+		;;
+
+	4)
+		tagging_check
+		;;
+	*)
+		ocf_exit_reason "Incorrect VG access mode detected!"
+		exit $OCF_ERR_CONFIGURED
+	esac
+
+	if [ $? -ne $OCF_SUCCESS ]; then
+		ocf_exit_reason "Improper configuration issue is detected!"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	return $OCF_SUCCESS
+}
+
+# To activate LV(s) with different "activation mode" parameters
+do_activate() {
+	local activate_opt=$1
+
+	# Only activate the specific LV if it's given
+	if [ -n "$LV" ]; then
+		ocf_run lvchange $activate_opt ${VG}/${LV}
+		if [ $? -ne $OCF_SUCCESS ]; then
+			return $OCF_ERR_GENERIC
+		fi
+	else
+		ocf_run lvchange $activate_opt ${VG}
+		if [ $? -ne $OCF_SUCCESS ]; then
+			return $OCF_ERR_GENERIC
+		fi
+	fi
+
+	return $OCF_SUCCESS
+}
+
+lvmlockd_activate() {
+	# activation opt
+	local activate_opt
+
+	if [ "$LV_activation_mode" = "shared" ]; then
+		activate_opt="-asy"
+	else
+		activate_opt="-aey"
+	fi
+
+	# lvmlockd requires shared VGs to be started before they're used
+	ocf_run vgchange --lockstart ${VG}
+	rc=$?
+	if [ $rc -ne $OCF_SUCCESS ]; then
+		ocf_log err "Failed to start shared VG(s), exit code: $rc"
+		return $OCF_ERR_GENERIC
+	fi
+
+	do_activate "$activate_opt"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+# clvmd must be running to activate clustered VG
+clvmd_activate() {
+	local activate_opt
+
+	if [ "$LV_activation_mode" = "shared" ]; then
+		activate_opt="-asy"
+	else
+		activate_opt="-aey"
+	fi
+
+	do_activate "$activate_opt"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+systemid_activate() {
+	local cur_systemid
+
+	pvscan --cache
+	cur_systemid=$(vgs --foreign --noheadings -o systemid ${VG} | tr -d '[:blank:]')
+
+	# Put our system ID on the VG
+	vgchange -y --config "local/extra_system_ids=[\"${cur_systemid}\"]" \
+		--systemid ${SYSTEM_ID} ${VG}
+
+	do_activate "-ay"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+tagging_activate() {
+	if ! set_tags ; then
+		ocf_log err "Failed to set tags on ${VG}."
+		return $OCF_ERR_GENERIC
+	fi
+
+	do_activate "-ay --config activation{volume_list=[\"@${OUR_TAG}\"]}"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+lvmlockd_deactivate() {
+	do_activate "-an"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	OUT=$(lvs --noheadings -S lv_active=active ${VG} 2>/dev/null)
+	[[ -n "$OUT" ]] && return $OCF_SUCCESS
+
+	# Close the lockspace of this VG if there is no active LV
+	ocf_run vgchange --lockstop ${VG}
+	rc=$?
+	if [ $rc -ne $OCF_SUCCESS ]; then
+		ocf_log err "Failed to close the shared VG lockspace, exit code: $rc"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+clvmd_deactivate() {
+	do_activate "-an"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+systemid_deactivate() {
+	do_activate "-an"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+tagging_deactivate() {
+	do_activate "-an --config activation{volume_list=[\"@${OUR_TAG}\"]}"
+	if [ $? -ne $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	if ! strip_tags ; then
+		ocf_log err "Failed to remove tags on ${VG}."
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+# TODO:
+# How can we accurately check if LVs in the given VG are all active?
+#
+# David:
+# If we wanted to check that all LVs in the VG are active, then we would
+# probably need to use the lvs/lv_live_table command here since dmsetup
+# won't know about inactive LVs that should be active.
+#
+# Eric:
+# But, lvs/lv_live_table command doesn't work well now. I tried the following
+# method:
+#
+# lv_count=$(vgs --foreign -o lv_count --noheadings ${VG} 2>/dev/null | tr -d '[:blank:]')
+# dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-")
+# test $lv_count -eq $dm_count
+#
+# It works, but we cannot afford to use LVM command in lvm_status. LVM command is expensive
+# because it may potencially scan all disks on the system, update the metadata even using
+# lvs/vgs when the metadata is somehow inconsistent.
+#
+# So, we have to make compromise that the VG is assumably active if any LV of the VG is active.
+lvm_status() {
+	local dm_count
+
+	if [ -n "${LV}" ]; then
+		# dmsetup ls? It cannot accept device name. It's
+		# too heavy to list all DM devices.
+		dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" \
+			| grep -Eq "${VG}-+${LV}"
+	else
+		dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-")
+		test $dm_count -gt 0
+	fi
+
+	if [ $? -ne 0 ]; then
+		return $OCF_NOT_RUNNING
+	fi
+
+	return $OCF_SUCCESS
+}
+
+lvm_start() {
+	local rc
+	local vol
+
+	if lvm_status ; then
+		ocf_log info "${vol}: is already active."
+		return $OCF_SUCCESS
+	fi
+
+	[ -z ${LV} ] && vol=${VG} || vol=${VG}/${LV}
+	ocf_log info "Activating ${vol}"
+
+	case ${VG_access_mode} in
+	1)
+		lvmlockd_activate
+		;;
+	2)
+		clvmd_activate
+		;;
+	3)
+		systemid_activate
+		;;
+	4)
+		tagging_activate
+		;;
+	*)
+		ocf_exit_reason "VG [${VG}] is not properly configured in cluster. It's unsafe!"
+		exit $OCF_ERR_CONFIGURED
+		;;
+	esac
+
+	rc=$?
+	if lvm_status ; then
+		ocf_log info "${vol}: activated successfully."
+		return $OCF_SUCCESS
+	else
+		ocf_exit_reason "${vol}: failed to activate."
+		return $rc
+	fi
+}
+
+# Deactivate LVM volume(s)
+lvm_stop() {
+	local vol
+
+	[[ -z ${LV} ]] && vol=${VG} || vol=${VG}/${LV}
+
+	if ! lvm_status ; then
+		ocf_log info "${vol}: has already been deactivated."
+		return $OCF_SUCCESS
+	fi
+
+	ocf_log info "Deactivating ${vol}"
+
+	if ! vgs ${VG} >/dev/null 2>&1 ; then
+                ocf_log info "Volume group ${VG} not found. Nothing to deactivate."
+                return $OCF_SUCCESS
+        fi
+
+	case ${VG_access_mode} in
+	1)
+		lvmlockd_deactivate
+		;;
+	2)
+		clvmd_deactivate
+		;;
+	3)
+		systemid_deactivate
+		;;
+	4)
+		tagging_deactivate
+		;;
+	*)
+		ocf_exit_reason "VG [${VG}] is not properly configured in cluster. It's unsafe!"
+		exit $OCF_ERR_CONFIGURED
+		;;
+	esac
+
+	if ! lvm_status ; then
+		ocf_log info "${vol}: deactivated successfully."
+		return $OCF_SUCCESS
+	else
+		ocf_exit_reason "${vol}: failed to deactivate."
+		return $OCF_ERR_GENERIC
+	fi
+}
+
+#
+# MAIN
+#
+
+case $__OCF_ACTION in
+meta-data)		meta_data
+			exit $OCF_SUCCESS
+			;;
+usage|help)		usage
+			exit $OCF_SUCCESS
+			;;
+esac
+
+# Parameters checking
+if [ -z "$VG" ]
+then
+	ocf_exit_reason "You must identify the volume group name!"
+	exit $OCF_ERR_ARGS
+fi
+
+if [ "$LV_activation_mode" != "shared" ] && [ "$LV_activation_mode" != "exclusive" ]
+then
+	ocf_exit_reason "Invalid value for activation_mode: $LV_activation_mode"
+	exit $OCF_ERR_ARGS
+fi
+
+# Convert VG_access_mode from string to index
+case ${VG_access_mode} in
+lvmlockd)
+	VG_access_mode=1
+	;;
+clvmd)
+	VG_access_mode=2
+	;;
+system_id)
+	VG_access_mode=3
+	;;
+tagging)
+	VG_access_mode=4
+	;;
+*)
+	ocf_exit_reason "You specified an invalid value for vg_access_mode: $VG_access_mode"
+	exit $OCF_ERR_ARGS
+	;;
+esac
+
+# Translate each action into the appropriate function call
+case $__OCF_ACTION in
+start)
+	lvm_validate
+	lvm_start
+	;;
+
+stop)
+	lvm_validate
+	lvm_stop
+	;;
+monitor)
+	lvm_status
+	;;
+validate-all)
+	lvm_validate
+	;;
+*)
+	usage
+	exit $OCF_ERR_UNIMPLEMENTED
+	;;
+esac
+rc=$?
+
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am
index 91d4090..3e9a642 100644
--- a/heartbeat/Makefile.am
+++ b/heartbeat/Makefile.am
@@ -65,9 +65,12 @@ ocf_SCRIPTS	     =  AoEtarget		\
 			IPsrcaddr		\
 			LVM			\
 			LinuxSCSI		\
+			lvmlockd		\
+			LVM-activate		\
 			MailTo			\
 			ManageRAID		\
 			ManageVE		\
+			NodeUtilization	\
 			Pure-FTPd		\
 			Raid1			\
 			Route			\
@@ -86,9 +89,12 @@ ocf_SCRIPTS	     =  AoEtarget		\
 			WinPopup		\
 			Xen			\
 			Xinetd			\
+			ZFS			\
 			anything		\
 			apache			\
 			asterisk		\
+			aws-vpc-move-ip		\
+			aws-vpc-route53		\
 			awseip			\
 			awsvip			\
 			clvm			\
@@ -112,6 +118,7 @@ ocf_SCRIPTS	     =  AoEtarget		\
 			jboss			\
 			kamailio		\
 			lxc			\
+			minio			\
 			mysql			\
 			mysql-proxy		\
 			nagios			\
@@ -119,8 +126,10 @@ ocf_SCRIPTS	     =  AoEtarget		\
 			nfsnotify		\
 			nfsserver		\
 			nginx			\
+			oraasm			\
 			oracle			\
 			oralsnr			\
+			ovsmonitor		\
 			pgagent			\
 			pgsql			\
 			pingd			\
@@ -130,6 +139,7 @@ ocf_SCRIPTS	     =  AoEtarget		\
 			proftpd			\
 			rabbitmq-cluster	\
 			redis			\
+			rkt			\
 			rsyncd			\
 			rsyslog			\
 			scsi2reservation	\
@@ -155,6 +165,9 @@ ocfcommon_DATA		= ocf-shellfuncs 	\
 			  http-mon.sh    	\
 			  sapdb-nosha.sh	\
 			  sapdb.sh		\
+			  lvm-clvm.sh       \
+			  lvm-plain.sh      \
+			  lvm-tag.sh        \
 			  ora-common.sh		\
 			  mysql-common.sh	\
 			  nfsserver-redhat.sh	\
diff --git a/heartbeat/NodeUtilization b/heartbeat/NodeUtilization
new file mode 100755
index 0000000..61969e6
--- /dev/null
+++ b/heartbeat/NodeUtilization
@@ -0,0 +1,226 @@
+#!/bin/sh
+#
+#
+#	NodeUtilization OCF Resource Agent
+#
+# Copyright (c) 2011 SUSE LINUX, John Shi
+# Copyright (c) 2016 SUSE LINUX, Kristoffer Gronlund
+#                    All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like.  Any license provided herein, whether implied or
+# otherwise, applies only to this software file.  Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+#######################################################################
+
+NodeUtilization_meta_data() {
+	cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="NodeUtilization">
+<version>1.0</version>
+
+<longdesc lang="en">
+The Node Utilization agent detects system parameters like available CPU, host
+memory and hypervisor memory availability, and adds them into the CIB for each
+node using crm_attribute. Run the agent as a clone resource to have it populate
+these parameters on each node.
+Note: Setting hv_memory only works with Xen at the moment, using the xl or xm
+command line tools.
+</longdesc>
+<shortdesc lang="en">Node Utilization</shortdesc>
+
+<parameters>
+<parameter name="dynamic" unique="0" required="0">
+<longdesc lang="en">
+If set, parameters will be updated if there are differences between the HA
+parameters and the system values when running the monitor action.
+If not set, the parameters will be set once when the resource instance starts.
+</longdesc>
+<shortdesc lang="en">Dynamically update parameters in monitor</shortdesc>
+<content type="boolean" default="true" />
+</parameter>
+
+<parameter name="utilization_cpu" unique="0" required="0">
+<longdesc lang="en">Enable setting node CPU utilization limit.</longdesc>
+<shortdesc lang="en">Set node CPU utilization limit.</shortdesc>
+<content type="boolean" default="true" />
+</parameter>
+
+<parameter name="utilization_cpu_reservation" unique="0" required="0">
+<longdesc lang="en">Subtract this value when setting the CPU utilization parameter.</longdesc>
+<shortdesc lang="en">CPU reservation.</shortdesc>
+<content type="integer" default="1" />
+</parameter>
+
+<parameter name="utilization_host_memory" unique="0" required="0">
+<longdesc lang="en">Enable setting available host memory.</longdesc>
+<shortdesc lang="en">Set available host memory.</shortdesc>
+<content type="boolean" default="true" />
+</parameter>
+
+<parameter name="utilization_host_memory_reservation" unique="0" required="0">
+<longdesc lang="en">Subtract this value when setting host memory utilization, in MB.</longdesc>
+<shortdesc lang="en">Host memory reservation, in MB.</shortdesc>
+<content type="integer" default="512" />
+</parameter>
+
+<parameter name="utilization_hv_memory" unique="0" required="0">
+<longdesc lang="en">Enable setting available hypervisor memory.</longdesc>
+<shortdesc lang="en">Set available hypervisor memory.</shortdesc>
+<content type="boolean" default="true" />
+</parameter>
+
+<parameter name="utilization_hv_memory_reservation" unique="0" required="0">
+<longdesc lang="en">Subtract this value when setting hypervisor memory utilization, in MB.</longdesc>
+<shortdesc lang="en">Hypervisor memory reservation, in MB.</shortdesc>
+<content type="integer" default="512" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start"   timeout="90" />
+<action name="stop"    timeout="100" />
+<action name="monitor" timeout="20s" interval="60s"/>
+<action name="meta-data"  timeout="5" />
+<action name="validate-all"  timeout="30" />
+</actions>
+</resource-agent>
+END
+}
+
+Host_Total_Memory() {
+    local xentool
+
+    xentool=$(which xl 2> /dev/null || which xm 2> /dev/null)
+
+    if [ -x $xentool ]; then
+        $xentool info | awk '/total_memory/{printf("%d\n",$3);exit(0)}'
+    else
+        ocf_log warn "Can only set hv_memory for Xen hypervisor"
+        echo "0"
+    fi
+}
+
+
+set_utilization() {
+    host_name="$(ocf_local_nodename)"
+
+    if ocf_is_true "$OCF_RESKEY_utilization_cpu"; then
+        sys_cpu=$(( $(grep -c processor /proc/cpuinfo) - $OCF_RESKEY_utilization_cpu_reservation ))
+        uti_cpu=$(crm_attribute -Q -t nodes -U "$host_name" -z -n cpu 2>/dev/null)
+
+        if [ "$sys_cpu" != "$uti_cpu" ]; then
+            if ! crm_attribute -t nodes -U "$host_name" -z -n cpu -v $sys_cpu; then
+                ocf_log err "Failed to set the cpu utilization attribute for $host_name using crm_attribute."
+                return 1
+            fi
+        fi
+    fi
+
+    if ocf_is_true "$OCF_RESKEY_utilization_host_memory"; then
+        sys_mem=$(( $(awk '/MemTotal/{printf("%d\n",$2/1024);exit(0)}' /proc/meminfo) - $OCF_RESKEY_utilization_host_memory_reservation ))
+        uti_mem=$(crm_attribute -Q -t nodes -U "$host_name" -z -n host_memory 2>/dev/null)
+
+        if [ "$sys_mem" != "$uti_mem" ]; then
+            if ! crm_attribute -t nodes -U "$host_name" -z -n host_memory -v $sys_mem; then
+                ocf_log err "Failed to set the host_memory utilization attribute for $host_name using crm_attribute."
+                return 1
+            fi
+        fi
+    fi
+
+    if ocf_is_true "$OCF_RESKEY_utilization_hv_memory"; then
+        hv_mem=$(( $(Host_Total_Memory) - OCF_RESKEY_utilization_hv_memory_reservation ))
+        uti_mem=$(crm_attribute -Q -t nodes -U "$host_name" -z -n hv_memory 2>/dev/null)
+
+        [ $hv_mem -lt 0 ] && hv_mem=0
+
+        if [ "$hv_mem" != "$uti_mem" ]; then
+            if ! crm_attribute -t nodes -U "$host_name" -z -n hv_memory -v $hv_mem; then
+                ocf_log err "Failed to set the hv_memory utilization attribute for $host_name using crm_attribute."
+                return 1
+            fi
+        fi
+    fi
+}
+
+NodeUtilization_usage() {
+    cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+}
+
+NodeUtilization_start() {
+    ha_pseudo_resource $statefile start
+    if ! ocf_is_true "$OCF_RESKEY_dynamic"; then
+        if ! set_utilization; then
+            exit $OCF_ERR_GENERIC
+        fi
+    fi
+    exit $OCF_SUCCESS
+}
+
+NodeUtilization_stop() {
+    ha_pseudo_resource $statefile stop
+    exit $OCF_SUCCESS
+}
+
+NodeUtilization_monitor() {
+    local rc
+    ha_pseudo_resource $statefile monitor
+    rc=$?
+
+    case $rc in
+        $OCF_SUCCESS)
+            if ocf_is_true "$OCF_RESKEY_dynamic"; then
+                if ! set_utilization; then
+                    exit $OCF_ERR_GENERIC
+                fi
+            fi
+            ;;
+        *) exit $rc;;
+    esac
+}
+
+NodeUtilization_validate() {
+    exit $OCF_SUCCESS
+}
+
+statefile=$OCF_RESOURCE_TYPE.$(echo $OCF_RESOURCE_INSTANCE | sed -e 's/^.*://')
+
+: ${OCF_RESKEY_pidfile:="$HA_VARRUN/NodeUtilization-${OCF_RESOURCE_INSTANCE}"}
+: ${OCF_RESKEY_dynamic:="true"}
+: ${OCF_RESKEY_utilization_cpu:="true"}
+: ${OCF_RESKEY_utilization_cpu_reservation="1"}
+: ${OCF_RESKEY_utilization_hv_memory:="true"}
+: ${OCF_RESKEY_utilization_hv_memory_reservation="512"}
+: ${OCF_RESKEY_utilization_host_memory:="true"}
+: ${OCF_RESKEY_utilization_host_memory_reservation="512"}
+
+OCF_REQUIRED_PARAMS=""
+OCF_REQUIRED_BINARIES=""
+ocf_rarun $*
diff --git a/heartbeat/Raid1 b/heartbeat/Raid1
index bef2606..c7a0d76 100755
--- a/heartbeat/Raid1
+++ b/heartbeat/Raid1
@@ -3,7 +3,7 @@
 #
 # License:      GNU General Public License (GPL)
 # Support:      users at clusterlabs.org
-# 
+#
 # Raid1
 #      Description: Manages a Linux software RAID device on a shared storage medium.
 #  Original Author: Eric Z. Ayers (eric.ayers at compgen.com)
@@ -31,7 +31,7 @@
 #
 # EXAMPLE config file /etc/mdadm.conf (for more info:man mdadm.conf)
 #
-#  DEVICE /dev/sdb1 /dev/sdc1 
+#  DEVICE /dev/sdb1 /dev/sdc1
 #  ARRAY /dev/md0 UUID=4a865b55:ba27ef8d:29cd5701:6fb42799
 #######################################################################
 # Initialization:
@@ -353,16 +353,22 @@ raid1_stop() {
 #
 raid1_monitor_one() {
 	local mddev=$1
-	local md=`echo $mddev | sed 's,/dev/,,'`
+	local md=
 	local rc
 	local TRY_READD=0
 	local pbsize
 	# check if the md device exists first
 	# but not if we are in the stop operation
 	# device existence is important only for the running arrays
-	if [ "$__OCF_ACTION" != "stop" -a ! -b $mddev ]; then
-		ocf_log info "$mddev is not a block device"
-		return $OCF_NOT_RUNNING
+	if [ "$__OCF_ACTION" != "stop" ]; then
+		if [ -h "$mddev" ]; then
+			md=$(ls $mddev -l | awk -F'/' '{print $NF}')
+		elif [ -b "$mddev" ]; then
+			md=$(echo $mddev | sed 's,/dev/,,')
+		else
+			ocf_log info "$mddev is not a block device"
+			return $OCF_NOT_RUNNING
+		fi
 	fi
 	if ! grep -e "^$md[ \t:]" /proc/mdstat >/dev/null ; then
 		ocf_log info "$md not found in /proc/mdstat"
@@ -389,8 +395,8 @@ raid1_monitor_one() {
 	if [ "$__OCF_ACTION" = "monitor" -a "$OCF_RESKEY_CRM_meta_interval" != 0 \
 		-a $TRY_READD -eq 1 -a $OCF_CHECK_LEVEL -gt 0 ]; then
 		ocf_log info "Attempting recovery sequence to re-add devices on $mddev:"
-		$MDADM $mddev --fail detached 
-		$MDADM $mddev --remove failed 
+		$MDADM $mddev --fail detached
+		$MDADM $mddev --remove failed
 		$MDADM $mddev --re-add missing
 		# TODO: At this stage, there's nothing to actually do
 		# here. Either this worked or it did not.
@@ -448,7 +454,7 @@ case "$1" in
 	meta_data
 	exit $OCF_SUCCESS
 	;;
-  usage) 
+  usage)
 	usage
 	exit $OCF_SUCCESS
 	;;
@@ -467,7 +473,7 @@ if [ -z "$RAIDCONF" ] ; then
 fi
 
 if [ ! -r "$RAIDCONF" ] ; then
-	ocf_exit_reason "Configuration file [$RAIDCONF] does not exist, or can not be opend!"
+	ocf_exit_reason "Configuration file [$RAIDCONF] does not exist, or can not be opened!"
 	exit $OCF_ERR_INSTALLED
 fi
 
@@ -530,7 +536,7 @@ fi
 # [ $HAVE_RAIDTOOLS = false ] <=> we have $MDADM,
 # otherwise we have raidtools (raidstart and raidstop)
 
-# Look for how we are called 
+# Look for how we are called
 case "$1" in
   start)
 	raid1_start
@@ -538,10 +544,10 @@ case "$1" in
   stop)
 	raid1_stop
 	;;
-  status) 
+  status)
 	raid1_status
 	;;
-  monitor) 
+  monitor)
 	raid1_monitor
 	;;
   validate-all)
@@ -549,7 +555,7 @@ case "$1" in
 	;;
   *)
 	usage
-	exit $OCF_ERR_UNIMPLEMENTED 
+	exit $OCF_ERR_UNIMPLEMENTED
 	;;
 esac
 
diff --git a/heartbeat/Route b/heartbeat/Route
index b018363..0a446ad 100755
--- a/heartbeat/Route
+++ b/heartbeat/Route
@@ -31,6 +31,11 @@
 : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
 . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
 
+# Default values
+OCF_RESKEY_family_default="detect"
+
+: ${OCF_RESKEY_family=${OCF_RESKEY_family_default}}
+
 #######################################################################
 
 meta_data() {
@@ -119,6 +124,17 @@ The routing table to be configured for the route.
 <content type="string" default="" />
 </parameter>
 
+<parameter name="family" unique="0" required="1">
+<longdesc lang="en">
+The address family to be used for the route
+ip4      IP version 4
+ip6      IP version 6
+detect   Detect from 'destination' address.
+</longdesc>
+<shortdesc lang="en">Address Family</shortdesc>
+<content type="string" default="${OCF_RESKEY_family}" />
+</parameter>
+
 </parameters>
 
 <actions>
@@ -291,9 +307,16 @@ done
 
 route_validate || exit $?
 
-case $OCF_RESKEY_destination in
-*:*) addr_family="-6" ;;
-  *) addr_family="-4" ;;
+case $OCF_RESKEY_family in
+    ip4) addr_family="-4" ;;
+    ip6) addr_family="-6" ;;
+    detect)
+        case $OCF_RESKEY_destination in
+            *:*)     addr_family="-6" ;;
+            *.*)     addr_family="-4" ;;
+              *) ocf_exit_reason "Address family detection requires a numeric destination address." ;;
+        esac ;;
+    *) ocf_exit_reason "Address family '${OCF_RESKEY_family}' not recognized." ;;
 esac
 
 case $__OCF_ACTION in
diff --git a/heartbeat/SAPInstance b/heartbeat/SAPInstance
index 49e60aa..7746b52 100755
--- a/heartbeat/SAPInstance
+++ b/heartbeat/SAPInstance
@@ -31,6 +31,7 @@
 #	OCF_RESKEY_POST_START_USEREXIT	(optional, lists a script which can be executed after the resource is started)
 #	OCF_RESKEY_PRE_STOP_USEREXIT	(optional, lists a script which can be executed before the resource is stopped)
 #	OCF_RESKEY_POST_STOP_USEREXIT	(optional, lists a script which can be executed after the resource is stopped)
+#	OCF_RESKEY_IS_ERS               (needed for ENQ/REPL NW 740)
 #
 #  TODO: - Option to shutdown sapstartsrv for non-active instances -> that means: do probes only with OS tools (sapinstance_status)
 #        - Option for better standalone enqueue server monitoring, using ensmon (test enque-deque)
@@ -141,6 +142,12 @@ Those services are monitored within the SAPInstance resource agent:
 - jcontrol
 - jstart
 
+Some other services could be monitored as well. They have to be
+given with the parameter MONITOR_SERVICES, e.g.:
+
+ - sapwebdisp
+ - TREXDaemon.x
+
 That names match the strings used in the output of the command 'sapcontrol -nr [Instance-Nr] -function GetProcessList'.
 The default should fit most cases where you want to manage a SAP Instance from the cluster. You may change this with this parameter, if you like to monitor more/less or other services that sapstartsrv supports.
 You may specify multiple services separated by a | (pipe) sign in this parameter: disp+work|msg_server|enserver
@@ -195,6 +202,15 @@ The name of the SAP START profile. Specify this parameter, if you have changed t
   <shortdesc lang="en">Path to a post-start script</shortdesc>
   <content type="string" default="" />
  </parameter>
+ <parameter name="IS_ERS" unique="0" required="0">
+  <longdesc lang="en">Only used for ASCS/ERS SAP Netweaver installations without implementing a master/slave resource to
+    allow the ASCS to 'find' the ERS running on an other cluster node after a resource failure. This parameter should be set
+    to true 'only' for the ERS instance for implementations following the SAP NetWeaver 7.40 HA certification (NW-HA-CLU-740). This includes also
+    systems for NetWeaver less than 7.40, if you like to impelemnt the NW-HA-CLU-740 scenario.
+  </longdesc>
+  <shortdesc lang="en">Mark SAPInstance as ERS instance</shortdesc>
+  <content type="boolean" default="false" />
+ </parameter>
 </parameters>
 
 <actions>
@@ -342,6 +358,12 @@ sapinstance_init() {
     currentSTART_PROFILE=$OCF_RESKEY_START_PROFILE
   fi
 
+  if [ -z "$OCF_RESKEY_IS_ERS" ]; then
+      is_ers="no"
+  else
+      is_ers="$OCF_RESKEY_IS_ERS"
+  fi
+
   if [ -z "$currentSTART_PROFILE" ]
   then
     SAPSTARTPROFILE="$DIR_PROFILE/START_${InstanceName}_${SAPVIRHOST}"
@@ -568,9 +590,11 @@ sapinstance_start() {
     ocf_log info "SAP Instance $SID-$InstanceName started: $output"
     rc=$OCF_SUCCESS
     sapuserexit POST_START_USEREXIT "$OCF_RESKEY_POST_START_USEREXIT"
+    if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 1 -l reboot; fi
   else
     ocf_log err "SAP Instance $SID-$InstanceName start failed: $output"
     rc=$OCF_NOT_RUNNING
+    if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 0 -l reboot; fi
   fi
 
   return $rc
@@ -628,6 +652,7 @@ sapinstance_stop() {
   fi
 
   sapuserexit POST_STOP_USEREXIT "$OCF_RESKEY_POST_STOP_USEREXIT"
+  if ocf_is_true $is_ers; then crm_attribute -n runs_ers_${SID} -v 0 -l reboot;  fi
 
   return $rc
 }
diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain
index 0b614f5..b974466 100755
--- a/heartbeat/VirtualDomain
+++ b/heartbeat/VirtualDomain
@@ -108,6 +108,17 @@ use libvirt's default transport to connect to the remote hypervisor.
 <content type="string" default="" />
 </parameter>
 
+<parameter name="migration_user" unique="0" required="0">
+<longdesc lang="en">
+The username will be used in the remote libvirt remoteuri/migrateuri. No user will be
+given (which means root) in the username if omitted
+
+If remoteuri is set, migration_user will be ignored.
+</longdesc>
+<shortdesc lang="en">Remote username for the remoteuri</shortdesc>
+<content type="string" />
+</parameter>
+
 <parameter name="migration_downtime" unique="0" required="0">
 <longdesc lang="en">
 Define max downtime during live migration in milliseconds
@@ -133,7 +144,8 @@ insert the suffix immediately prior to the first period (.) in the FQDN.
 At the moment Qemu/KVM and Xen migration via a dedicated network is supported.
 
 Note: Be sure this composed host name is locally resolveable and the
-associated IP is reachable through the favored network.
+associated IP is reachable through the favored network. This suffix will
+be added to the remoteuri and migrateuri parameters.
 
 See also the migrate_options parameter below.
 </longdesc>
@@ -141,6 +153,22 @@ See also the migrate_options parameter below.
 <content type="string" default="" />
 </parameter>
 
+<parameter name="migrateuri" unique="0" required="0">
+<longdesc lang="en">
+You can also specify here if the calculated migrate URI is unsuitable for your
+environment.
+
+If migrateuri is set then migration_network_suffix, migrateport and
+--migrateuri in migrate_options are effectively ignored. Use "%n" as the
+placeholder for the target node name.
+
+Please refer to the libvirt documentation for details on guest
+migration.
+</longdesc>
+<shortdesc lang="en">Custom migrateuri for migration state transfer</shortdesc>
+<content type="string" />
+</parameter>
+
 <parameter name="migrate_options" unique="0" required="0">
 <longdesc lang="en">
 Extra virsh options for the guest live migration. You can also specify
@@ -195,6 +223,20 @@ This port will be used in the qemu migrateuri. If unset, the port will be a rand
 <content type="integer" />
 </parameter>
 
+<parameter name="remoteuri" unique="0" required="0">
+<longdesc lang="en">
+Use this URI as virsh connection URI to commuicate with a remote hypervisor.
+
+If remoteuri is set then migration_user and migration_network_suffix are
+effectively ignored. Use "%n" as the placeholder for the target node name.
+
+Please refer to the libvirt documentation for details on guest
+migration.
+</longdesc>
+<shortdesc lang="en">Custom remoteuri to communicate with a remote hypervisor</shortdesc>
+<content type="string" />
+</parameter>
+
 <parameter name="save_config_on_stop" unique="0" required="0">
 <longdesc lang="en">
 Changes to a running VM's config are normally lost on stop. 
@@ -229,6 +271,19 @@ Restore state on start/stop
 <content type="string" default=""/>
 </parameter>
 
+<parameter name="shutdown_mode">
+<longdesc lang="en">
+virsh shutdown method to use. Please verify that it is supported by your virsh toolsed with 'virsh help shutdown'
+When this parameter is set --mode shutdown_mode is passed as an additional argument to the 'virsh shutdown' command.
+One can use this option in case default acpi method does not work. Verify that this mode is supported
+by your VM.  By default --mode is not passed.
+</longdesc>
+<shortdesc lang="en">
+Instruct virsh to use specific shutdown mode
+</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
 </parameters>
 
 <actions>
@@ -590,7 +645,10 @@ VirtualDomain_stop() {
 			# issue the shutdown if save state didn't shutdown for us
 			if [ $needshutdown -eq 1 ]; then
 				# Issue a graceful shutdown request
-				virsh $VIRSH_OPTIONS shutdown ${DOMAIN_NAME}
+				if [ -n "${OCF_RESKEY_CRM_shutdown_mode}" ]; then
+					shutdown_opts="--mode ${OCF_RESKEY_CRM_shutdown_mode}"
+				fi
+				virsh $VIRSH_OPTIONS shutdown ${DOMAIN_NAME} $shutdown_opts
 			fi
 
 			# The "shutdown_timeout" we use here is the operation
@@ -677,22 +735,52 @@ VirtualDomain_migrate_to() {
 		# Find out the remote hypervisor to connect to. That is, turn
 		# something like "qemu://foo:9999/system" into
 		# "qemu+tcp://bar:9999/system"
-		if [ -n "${OCF_RESKEY_migration_transport}" ]; then
-			transport_suffix="+${OCF_RESKEY_migration_transport}"
+
+		if [ -n "${OCF_RESKEY_remoteuri}" ]; then
+			remoteuri=`echo "${OCF_RESKEY_remoteuri}" |
+				sed "s/%n/$target_node/g"`
+		else
+			if [ -n "${OCF_RESKEY_migration_transport}" ]; then
+				transport_suffix="+${OCF_RESKEY_migration_transport}"
+			fi
+
+			# append user defined suffix if virsh target should differ from cluster node name
+			if [ -n "${OCF_RESKEY_migration_network_suffix}" ]; then
+				# Hostname might be a FQDN
+				target_node=$(echo ${target_node} | sed -e "s,^\([^.]\+\),\1${OCF_RESKEY_migration_network_suffix},")
+			fi
+
+			# a remote user has been defined to connect to target_node
+			if echo ${OCF_RESKEY_migration_user} | grep -q "^[a-z][-a-z0-9]*$" ; then
+				target_node="${OCF_RESKEY_migration_user}@${target_node}"
+			fi
+
+			# Scared of that sed expression? So am I. :-)
+			remoteuri=$(echo ${OCF_RESKEY_hypervisor} | sed -e "s,\(.*\)://[^/:]*\(:\?[0-9]*\)/\(.*\),\1${transport_suffix}://${target_node}\2/\3,")
 		fi
 
 		# User defined migrateuri or do we make one?
 		migrate_opts="$OCF_RESKEY_migrate_options"
-		if echo "$migrate_opts" | fgrep -qs -- "--migrateuri="; then
+
+		# migration_uri is directly set
+		if [ -n "${OCF_RESKEY_migrateuri}" ]; then
+			migrateuri=`echo "${OCF_RESKEY_migrateuri}" |
+				sed "s/%n/$target_node/g"`
+
+		# extract migrationuri from options
+		elif echo "$migrate_opts" | fgrep -qs -- "--migrateuri="; then
 			migrateuri=`echo "$migrate_opts" |
 				sed "s/.*--migrateuri=\([^ ]*\).*/\1/;s/%n/$target_node/g"`
-			migrate_opts=`echo "$migrate_opts" |
-				sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\2/"`
+
+		# auto generate
 		else
 			migrateuri=`mk_migrateuri`
 		fi
-		# Scared of that sed expression? So am I. :-)
-		remoteuri=$(echo ${OCF_RESKEY_hypervisor} | sed -e "s,\(.*\)://[^/:]*\(:\?[0-9]*\)/\(.*\),\1${transport_suffix}://${target_node}\2/\3,")
+
+		# remove --migrateuri from migration_opts
+		migrate_opts=`echo "$migrate_opts" |
+			sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\2/"`
+
 
 		# save config if needed
 		if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then
diff --git a/heartbeat/ZFS b/heartbeat/ZFS
new file mode 100755
index 0000000..16cb138
--- /dev/null
+++ b/heartbeat/ZFS
@@ -0,0 +1,192 @@
+#!/bin/sh
+#
+# License:      GNU General Public License (GPL)
+# Support:      zfs at lists.illumos.org
+# Written by:   Saso Kiselkov
+#
+#	This script manages ZFS pools
+#	It can import a ZFS pool or export it
+#
+#	usage: $0 {start|stop|status|monitor|validate-all|meta-data}
+#
+#	The "start" arg imports a ZFS pool.
+#	The "stop" arg exports it.
+#
+#       OCF parameters are as follows
+#       OCF_RESKEY_pool - the pool to import/export
+#
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+# Defaults
+OCF_RESKEY_importforce_default=true
+
+: ${OCF_RESKEY_importforce=${OCF_RESKEY_importforce_default}}
+
+USAGE="usage: $0 {start|stop|status|monitor|validate-all|meta-data}";
+
+#######################################################################
+
+meta_data() {
+        cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ZFS">
+<version>1.0</version>
+<longdesc lang="en">
+This script manages ZFS pools
+It can import a ZFS pool or export it
+</longdesc>
+<shortdesc lang="en">Manages ZFS pools</shortdesc>
+
+<parameters>
+<parameter name="pool" unique="1" required="1">
+<longdesc lang="en">
+The name of the ZFS pool to manage, e.g. "tank".
+</longdesc>
+<shortdesc lang="en">ZFS pool name</shortdesc>
+<content type="string" default="" />
+</parameter>
+<parameter name="importargs" unique="0" required="0">
+<longdesc lang="en">
+Arguments to zpool import, e.g. "-d /dev/disk/by-id".
+</longdesc>
+<shortdesc lang="en">Import arguments</shortdesc>
+<content type="string" default="" />
+</parameter>
+<parameter name="importforce" unique="1" required="0">
+<longdesc lang="en">
+zpool import is given the -f option.
+</longdesc>
+<shortdesc lang="en">Import is forced</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_importforce_default}" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start"   timeout="60s" />
+<action name="stop"    timeout="60s" />
+<action name="monitor" depth="0"  timeout="30s" interval="5s" />
+<action name="validate-all"  timeout="30s" />
+<action name="meta-data"  timeout="5s" />
+</actions>
+</resource-agent>
+END
+        exit $OCF_SUCCESS
+}
+
+zpool_is_imported () {
+    zpool list -H "$OCF_RESKEY_pool" > /dev/null
+}
+
+# Forcibly imports a ZFS pool, mounting all of its auto-mounted filesystems
+# (as configured in the 'mountpoint' and 'canmount' properties)
+# If the pool is already imported, no operation is taken.
+zpool_import () {
+    if ! zpool_is_imported; then
+        ocf_log debug "${OCF_RESKEY_pool}:starting import"
+
+        # The meanings of the options to import are as follows:
+        #   -f : import even if the pool is marked as imported to another
+        #        system - the system may have failed and not exported it
+        #        cleanly.
+        #   -o cachefile=none : the import should be temporary, so do not
+        #        cache it persistently (across machine reboots). We want
+        #        the CRM to explicitly control imports of this pool.
+	if ocf_is_true "${OCF_RESKEY_importforce}"; then
+	    FORCE=-f
+	else
+	    FORCE=""
+	fi
+        if zpool import $FORCE $OCF_RESKEY_importargs -o cachefile=none "$OCF_RESKEY_pool" ; then
+            ocf_log debug "${OCF_RESKEY_pool}:import successful"
+            return $OCF_SUCCESS
+        else
+            ocf_log debug "${OCF_RESKEY_pool}:import failed"
+            return $OCF_ERR_GENERIC
+        fi
+    fi
+}
+
+# Forcibly exports a ZFS pool, unmounting all of its filesystems in the process
+# If the pool is not imported, no operation is taken.
+zpool_export () {
+    if zpool_is_imported; then
+        ocf_log debug "${OCF_RESKEY_pool}:starting export"
+
+        # -f : force the export, even if we have mounted filesystems
+        # Please note that this may fail with a "busy" error if there are
+        # other kernel subsystems accessing the pool (e.g. SCSI targets).
+        # Always make sure the pool export is last in your failover logic.
+        if zpool export -f "$OCF_RESKEY_pool" ; then
+            ocf_log debug "${OCF_RESKEY_pool}:export successful"
+            return $OCF_SUCCESS
+	else
+            ocf_log debug "${OCF_RESKEY_pool}:export failed"
+            return $OCF_ERR_GENERIC
+	fi
+    fi
+}
+
+# Monitors the health of a ZFS pool resource. Please note that this only
+# checks whether the pool is imported and functional, not whether it has
+# any degraded devices (use monitoring systems such as Zabbix for that).
+zpool_monitor () {
+    # If the pool is not imported, then we can't monitor its health
+    if ! zpool_is_imported; then
+        return $OCF_NOT_RUNNING
+    fi
+
+    # Check the pool status
+    HEALTH=$(zpool list -H -o health "$OCF_RESKEY_pool")
+    case "$HEALTH" in
+        ONLINE|DEGRADED) return $OCF_SUCCESS;;
+        FAULTED)         return $OCF_NOT_RUNNING;;
+        *)               return $OCF_ERR_GENERIC;;
+    esac
+}
+
+# Validates whether we can import a given ZFS pool
+zpool_validate () {
+    # Check that the 'zpool' command is known
+    if ! which zpool > /dev/null; then
+        return $OCF_ERR_INSTALLED
+    fi
+
+    # If the pool is imported, then it is obviously valid
+    if zpool_is_imported; then
+        return $OCF_SUCCESS
+    fi
+
+    # Check that the pool can be imported
+    if zpool import $OCF_RESKEY_importargs | grep 'pool:' | grep "\\<$OCF_RESKEY_pool\\>" > /dev/null;
+    then
+        return $OCF_SUCCESS
+    else
+        return $OCF_ERR_CONFIGURED
+    fi
+}
+
+usage () {
+    echo "$USAGE" >&2
+    return $1
+}
+
+if [ $# -ne 1 ]; then
+    usage $OCF_ERR_ARGS
+fi
+
+case $1 in
+    meta-data)		meta_data;;
+    start)		zpool_import;;
+    stop)		zpool_export;;
+    status|monitor)	zpool_monitor;;
+    validate-all)	zpool_validate;;
+    usage)		usage $OCF_SUCCESS;;
+    *)			usage $OCF_ERR_UNIMPLEMENTED;;
+esac
+
+exit $?
diff --git a/heartbeat/anything b/heartbeat/anything
index 96dc14e..fbf8d2c 100755
--- a/heartbeat/anything
+++ b/heartbeat/anything
@@ -72,6 +72,18 @@ anything_status() {
 anything_start() {
 	if ! anything_status
 	then
+		#Make sure that PID Directory exists and is writable by proper user
+		piddir=`dirname $pidfile`
+		if ! su -s /bin/sh - $user -c "test -w $piddir"; then
+			#PID Directory is not writeable by user
+			ocf_log warn "Directory $piddir is not writable by $user, attempting to fix."
+			ocf_log info "Creating directory $piddir"
+			mkdir -p $piddir
+			ocf_log info "Changing permissions for $piddir for user $user"
+			chown $user: $piddir
+		else
+			ocf_log debug "Directory $piddir exists, and is writeable by $user. All fine"
+		fi
 		if [ -n "$logfile" -a -n "$errlogfile" ]
 		then
 			# We have logfile and errlogfile, so redirect STDOUT und STDERR to different files
@@ -85,8 +97,10 @@ anything_start() {
 		eval $cmd > $pidfile
 		if anything_status
 		then
-			ocf_log debug "$process: $cmd started successfully"
-			return $OCF_SUCCESS
+			ocf_log debug "$process: $cmd started successfully, calling monitor"
+			anything_monitor
+			myres=$?
+			return $myres
 		else 
 			ocf_log err "$process: $cmd could not be started"
 			return $OCF_ERR_GENERIC
@@ -214,7 +228,7 @@ This is a generic OCF RA to manage almost anything.
 <shortdesc lang="en">Manages an arbitrary service</shortdesc>
 
 <parameters>
-<parameter name="binfile" required="1" unique="1">
+<parameter name="binfile" required="1" unique="0">
 <longdesc lang="en">
 The full name of the binary to be executed. This is expected to keep running with the same pid and not just do something and exit.
 </longdesc>
diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip
new file mode 100755
index 0000000..30ea106
--- /dev/null
+++ b/heartbeat/aws-vpc-move-ip
@@ -0,0 +1,306 @@
+#!/bin/sh
+#
+#
+# OCF resource agent to move an IP address within a VPC in the AWS
+#
+# Copyright (c) 2017 Markus Guertler (SUSE)
+# Based on code of Adam Gandelman (GitHub ec2-resource-agents/elasticip)
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like.  Any license provided herein, whether implied or
+# otherwise, applies only to this software file.  Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+
+
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+# Defaults
+OCF_RESKEY_awscli_default="/usr/bin/aws"
+OCF_RESKEY_profile_default="default"
+OCF_RESKEY_monapi_default="false"
+
+: ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
+: ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}}
+: ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}}
+#######################################################################
+
+
+USAGE="usage: $0 {start|stop|status|meta-data}";
+###############################################################################
+
+
+###############################################################################
+#
+# Functions
+#
+###############################################################################
+
+
+metadata() {
+cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="aws-vpc-move-ip">
+<version>2.0</version>
+<longdesc lang="en">
+Resource Agent to move IP addresses within a VPC of the Amazon Webservices EC2
+by changing an entry in an specific routing table
+</longdesc>
+<shortdesc lang="en">Move IP within a APC of the AWS EC2</shortdesc>
+
+<parameters>
+<parameter name="awscli">
+<longdesc lang="en">
+Path to command line tools for AWS
+</longdesc>
+<shortdesc lang="en">Path to AWS CLI tools</shortdesc>
+<content type="string" default="${OCF_RESKEY_awscli_default}" />
+</parameter>
+
+<parameter name="profile">
+<longdesc lang="en">
+Valid AWS CLI profile name (see ~/.aws/config and 'aws configure')
+</longdesc>
+<shortdesc lang="en">profile name</shortdesc>
+<content type="string" default="${OCF_RESKEY_profile_default}" />
+</parameter>
+
+<parameter name="ip" required="1">
+<longdesc lang="en">
+VPC private IP address
+</longdesc>
+<shortdesc lang="en">VPC private IP</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="routing_table" required="1">
+<longdesc lang="en">
+Name of the routing table, where the route for the IP address should be changed, i.e. rtb-...
+</longdesc>
+<shortdesc lang="en">routing table name</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+<parameter name="interface" required="1">
+<longdesc lang="en">
+Name of the network interface, i.e. eth0
+</longdesc>
+<shortdesc lang="en">network interface name</shortdesc>
+<content type="string" default="eth0" />
+</parameter>
+
+<parameter name="monapi">
+<longdesc lang="en">
+Enable enhanced monitoring using AWS API calls to check route table entry
+</longdesc>
+<shortdesc lang="en">Enhanced Monitoring</shortdesc>
+<content type="boolean" default="${OCF_RESKEY_monapi_default}" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start" timeout="180" />
+<action name="stop" timeout="180" />
+<action name="monitor" depth="0" timeout="30" interval="60" />
+<action name="validate-all" timeout="5" />
+<action name="meta-data" timeout="5" />
+</actions>
+</resource-agent>
+END
+}
+
+ec2ip_validate() {
+	for cmd in aws ip curl; do
+		check_binary "$cmd"
+	done
+
+	if [ -z "$OCF_RESKEY_profile" ]; then
+		ocf_exit_reason "profile parameter not set"
+		return $OCF_ERR_CONFIGURED
+	fi
+
+	EC2_INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
+
+	if [ -z "${EC2_INSTANCE_ID}" ]; then
+		ocf_exit_reason "Instance ID not found. Is this a EC2 instance?"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+ec2ip_monitor() {
+	if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ]; then
+		ocf_log info "monitor: check routing table (API call)"
+		cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $OCF_RESKEY_routing_table"
+		ocf_log debug "executing command: $cmd"
+		ROUTE_TO_INSTANCE="$($cmd | grep $OCF_RESKEY_ip | awk '{ print $3 }')"
+		if [ -z "$ROUTE_TO_INSTANCE" ]; then
+			ROUTE_TO_INSTANCE="<unknown>"
+		fi
+
+		if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ];then 
+			ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE"
+			return $OCF_NOT_RUNNING
+		fi
+	else
+		ocf_log debug "monitor: Enhanced Monitoring disabled - omitting API call"
+	fi
+
+	cmd="ping -W 1 -c 1 $OCF_RESKEY_ip"
+	ocf_log debug "executing command: $cmd"
+	$cmd > /dev/null
+	if [ "$?" -gt 0 ]; then
+		ocf_log warn "IP $OCF_RESKEY_ip not locally reachable via ping on this system"
+		return $OCF_NOT_RUNNING
+	fi
+
+	ocf_log debug "route in VPC and locally reachable"
+	return $OCF_SUCCESS
+}
+
+
+ec2ip_drop() {
+	cmd="ip addr delete ${OCF_RESKEY_ip}/32 dev $OCF_RESKEY_interface"
+	ocf_log debug "executing command: $cmd"
+	$cmd
+	rc=$?
+	if [ "$rc" -gt 0 ]; then
+		ocf_log warn "command failed, rc $rc"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+ec2ip_get_and_configure() {
+	# Adjusting the routing table
+	cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile ec2 replace-route --route-table-id $OCF_RESKEY_routing_table --destination-cidr-block ${OCF_RESKEY_ip}/32 --instance-id $EC2_INSTANCE_ID"
+	ocf_log debug "executing command: $cmd"
+	$cmd
+	rc=$?
+	if [ "$rc" != 0 ]; then
+		ocf_log warn "command failed, rc: $rc"
+		return $OCF_ERR_GENERIC
+	fi
+
+	# Reconfigure the local ip address
+	ec2ip_drop
+	ip addr add "${OCF_RESKEY_ip}/32" dev $OCF_RESKEY_interface
+	rc=$?
+	if [ $rc != 0 ]; then
+		ocf_log warn "command failed, rc: $rc"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+ec2ip_stop() {
+	ocf_log info "EC2: Bringing down IP address $OCF_RESKEY_ip"
+
+	ec2ip_monitor
+	if [ $? = $OCF_NOT_RUNNING ]; then
+		ocf_log info "EC2: Address $OCF_RESKEY_ip already down"
+		return $OCF_SUCCESS
+	fi
+
+	ec2ip_drop
+	if [ $? != $OCF_SUCCESS ]; then
+		return $OCF_ERR_GENERIC
+	fi
+
+	ec2ip_monitor
+	if [ $? != $OCF_NOT_RUNNING ]; then
+		ocf_log error "EC2: Couldn't bring down IP address $OCF_RESKEY_ip on interface $OCF_RESKEY_interface."
+		return $OCF_ERR_GENERIC
+	fi
+
+	ocf_log info "EC2: Successfully brought down $OCF_RESKEY_ip"
+	return $OCF_SUCCESS
+}
+
+ec2ip_start() {
+	ocf_log info "EC2: Moving IP address $OCF_RESKEY_ip to this host by adjusting routing table $OCF_RESKEY_routing_table"
+
+	ec2ip_monitor
+	if [ $? = $OCF_SUCCESS ]; then
+		ocf_log info "EC2: $OCF_RESKEY_ip already started"
+		return $OCF_SUCCESS
+	fi
+
+	ocf_log info "EC2: Adjusting routing table and locally configuring IP address"
+	ec2ip_get_and_configure
+	rc=$?
+	if [ $rc != $OCF_SUCCESS ]; then
+		ocf_log error "Received $rc from 'aws'"
+		return $OCF_ERR_GENERIC
+	fi
+
+	ec2ip_monitor
+	if [ $? != $OCF_SUCCESS ]; then
+		ocf_log error "EC2: IP address couldn't be configured on this host (IP: $OCF_RESKEY_ip, Interface: $OCF_RESKEY_interface)"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+###############################################################################
+#
+# MAIN
+#
+###############################################################################
+
+case $__OCF_ACTION in
+	meta-data)
+		metadata
+		exit $OCF_SUCCESS
+		;;
+	usage|help)
+		echo $USAGE
+		exit $OCF_SUCCESS
+		;;
+esac
+
+if ! ocf_is_root; then
+	ocf_log err "You must be root for $__OCF_ACTION operation."
+	exit $OCF_ERR_PERM
+fi
+
+ec2ip_validate
+
+case $__OCF_ACTION in
+	start)
+		ec2ip_start;;
+	stop)
+		ec2ip_stop;;
+	monitor)
+		ec2ip_monitor;;
+	validate-all)
+		exit $?;;
+	*)	
+		echo $USAGE
+		exit $OCF_ERR_UNIMPLEMENTED
+		;;
+esac
diff --git a/heartbeat/aws-vpc-route53 b/heartbeat/aws-vpc-route53
new file mode 100755
index 0000000..ff44bf7
--- /dev/null
+++ b/heartbeat/aws-vpc-route53
@@ -0,0 +1,302 @@
+#!/bin/bash
+#
+#   Copyright 2017 Amazon.com, Inc. and its affiliates. All Rights Reserved.
+#   Licensed under the MIT License.
+#
+#  Copyright 2017 Amazon.com, Inc. and its affiliates
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+# of the Software, and to permit persons to whom the Software is furnished to do
+# so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+#
+#
+#
+# OCF resource agent to move an IP address within a VPC in the AWS
+# Written by Stefan Schneider , Martin Tegmeier (AWS)
+# Based on code of Markus Guertler#
+#
+#
+# OCF resource agent to move an IP address within a VPC in the AWS
+# Written by Stefan Schneider (AWS) , Martin Tegmeier (AWS)
+# Based on code of Markus Guertler (SUSE)
+#
+# Mar. 15, 2017, vers 1.0.2
+
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+OCF_RESKEY_ttl_default=10
+
+: ${OCF_RESKEY_ttl:=${OCF_RESKEY_ttl_default}}
+
+#######################################################################
+
+usage() {
+	cat <<-EOT
+	usage: $0 {start|stop|status|monitor|validate-all|meta-data}
+	EOT
+}
+
+metadata() {
+cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="aws-vpc-route53">
+<version>1.0</version>
+<longdesc lang="en">
+Update Route53 record of Amazon Webservices EC2 by updating an entry in a
+hosted zone ID table.
+
+AWS instances will require policies which allow them to update Route53 ARecords:
+{
+	"Version": "2012-10-17",
+	"Statement": [
+		{
+			"Sid": "Stmt1471878724000",
+			"Effect": "Allow",
+			"Action": [
+				"route53:ChangeResourceRecordSets",
+				"route53:GetChange",
+				"route53:ListResourceRecordSets",
+			],
+			"Resource": [
+				"*"
+			]
+		}
+	]
+}
+
+Example Cluster Configuration:
+
+Use a configuration in "crm configure edit" which looks as follows. Replace
+hostedzoneid, fullname and profile with the appropriate values:
+
+primitive res_route53 ocf:heartbeat:aws-vpc-route53 \
+		params hostedzoneid=EX4MPL3EX4MPL3 fullname=service.cloud.example.corp. profile=cluster \
+		op start interval=0 timeout=180 \
+		op stop interval=0 timeout=180 \
+		op monitor interval=300 timeout=180 \
+		meta target-role=Started
+</longdesc>
+<shortdesc lang="en">Update Route53 VPC record for AWS EC2</shortdesc>
+<parameters>
+<parameter name="hostedzoneid" required="1">
+<longdesc lang="en">
+Hosted zone ID of Route 53. This is the table of
+the Route 53 record.
+</longdesc>
+<shortdesc lang="en">AWS hosted zone ID</shortdesc>
+<content type="string" default="" />
+</parameter>
+<parameter name="fullname" required="1">
+<longdesc lang="en">
+The full name of the service which will host the IP address.
+Example: service.cloud.example.corp.
+Note: The trailing dot is important to Route53!
+</longdesc>
+<shortdesc lang="en">Full service name</shortdesc>
+<content type="string" default="" />
+</parameter>
+<parameter name="ttl" required="0">
+<longdesc lang="en">
+Time to live for Route53 ARECORD
+</longdesc>
+<shortdesc lang="en">ARECORD TTL</shortdesc>
+<content type="string" default="${OCF_RESKEY_ttl_default}" />
+</parameter>
+<parameter name="profile" required="1">
+<longdesc lang="en">
+The name of the AWS CLI profile of the root account. This
+profile will have to use the "text" format for CLI output.
+The file /root/.aws/config should have an entry which looks
+like:
+
+  [profile cluster]
+	region = us-east-1
+	output = text
+
+"cluster" is the name which has to be used in the cluster
+configuration. The region has to be the current one. The
+output has to be "text".
+</longdesc>
+<shortdesc lang="en">AWS Profile Name</shortdesc>
+<content type="string" default="" />
+</parameter>
+</parameters>
+<actions>
+<action name="start" timeout="180" />
+<action name="stop" timeout="180" />
+<action name="monitor" depth="0" timeout="180" interval="300" />
+<action name="validate-all" timeout="5" />
+<action name="meta-data" timeout="5" />
+</actions>
+</resource-agent>
+END
+}
+
+ec2ip_validate() {
+	ocf_log debug "function: validate"
+
+	# Full name
+	[[ -z "$OCF_RESKEY_fullname" ]] && ocf_log error "Full name parameter not set $OCF_RESKEY_fullname!" && exit $OCF_ERR_CONFIGURED
+
+	# Hosted Zone ID
+	[[ -z "$OCF_RESKEY_hostedzoneid" ]] && ocf_log error "Hosted Zone ID parameter not set $OCF_RESKEY_hostedzoneid!" && exit $OCF_ERR_CONFIGURED
+
+	# profile
+	[[ -z "$OCF_RESKEY_profile" ]] && ocf_log error "AWS CLI profile not set $OCF_RESKEY_profile!" && exit $OCF_ERR_CONFIGURED
+
+	# TTL
+	[[ -z "$OCF_RESKEY_ttl" ]] && ocf_log error "TTL not set $OCF_RESKEY_ttl!" && exit $OCF_ERR_CONFIGURED
+
+	ocf_log debug "Testing aws command"
+	aws --version 2>&1
+	if [ "$?" -gt 0 ]; then
+		ocf_log error "Error while executing aws command as user root! Please check if AWS CLI tools (Python flavor) are properly installed and configured." && exit $OCF_ERR_INSTALLED
+	fi
+	ocf_log debug "ok"
+
+	if [ -n "$OCF_RESKEY_profile" ]; then
+		AWS_PROFILE_OPT="--profile $OCF_RESKEY_profile"
+	else
+		AWS_PROFILE_OPT="--profile default"
+	fi
+
+	return $OCF_SUCCESS
+}
+
+ec2ip_monitor() {
+	ec2ip_validate
+	ocf_log debug "Checking Route53 record sets"
+	IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')"
+	ARECORD="$(aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query "ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" | grep RESOURCERECORDS | /usr/bin/awk '{ print $2 }' )"
+	ocf_log debug "Found IP address: $ARECORD ."
+	if [ "${ARECORD}" == "${IPADDRESS}" ]; then
+		ocf_log debug "ARECORD $ARECORD found"
+		return $OCF_SUCCESS
+	else
+		ocf_log debug "No ARECORD found"
+		return $OCF_NOT_RUNNING
+	fi
+
+	return $OCF_SUCCESS
+}
+
+_update_record() {
+	update_action="$1"
+	IPADDRESS="$2"
+	ocf_log info "Updating Route53 $OCF_RESKEY_hostedzoneid with $IPADDRESS for $OCF_RESKEY_fullname"
+	ROUTE53RECORD="$(maketempfile)"
+	if [ $? -ne 0 ] || [ -z "$ROUTE53RECORD" ]; then
+		ocf_exit_reason "Failed to create temporary file for record update"
+		exit $OCF_ERR_GENERIC
+	fi
+	cat >>"${ROUTE53RECORD}" <<-EOF
+	{
+		  "Comment": "Update record to reflect new IP address for a system ",
+		  "Changes": [
+			  {
+				  "Action": "${update_action}",
+				  "ResourceRecordSet": {
+					  "Name": "${OCF_RESKEY_fullname}",
+					  "Type": "A",
+					  "TTL": ${OCF_RESKEY_ttl},
+					  "ResourceRecords": [
+						  {
+							  "Value": "${IPADDRESS}"
+						  }
+					  ]
+				  }
+			  }
+		  ]
+	}
+	EOF
+	cmd="aws --profile ${OCF_RESKEY_profile} route53 change-resource-record-sets --hosted-zone-id ${OCF_RESKEY_hostedzoneid} \
+	  --change-batch file://${ROUTE53RECORD} "
+	ocf_log debug "Executing command: $cmd"
+	CHANGEID=$($cmd | grep CHANGEINFO |	 /usr/bin/awk -F'\t' '{ print $3 }' )
+	ocf_log debug "Change id: ${CHANGEID}"
+	rmtempfile ${ROUTE53RECORD}
+	CHANGEID=$(echo $CHANGEID |cut -d'/' -f 3 |cut -d'"' -f 1 )
+	ocf_log debug "Change id: ${CHANGEID}"
+	STATUS="PENDING"
+	MYSECONDS=2
+	while [ "$STATUS" = 'PENDING' ]; do
+		sleep	${MYSECONDS}
+		STATUS="$(aws --profile ${OCF_RESKEY_profile} route53 get-change --id $CHANGEID | grep CHANGEINFO |  /usr/bin/awk -F'\t' '{ print $4 }' |cut -d'"' -f 2 )"
+		ocf_log debug "Waited for ${MYSECONDS} seconds and checked execution of Route 53 update status: ${STATUS} "
+	done
+}
+
+ec2ip_stop() {
+	ocf_log info "Bringing down Route53 agent. (Will remove ARECORD)"
+	IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')"
+	ARECORD="$(aws $AWS_PROFILE_OPT route53 list-resource-record-sets --hosted-zone-id $OCF_RESKEY_hostedzoneid --query "ResourceRecordSets[?Name=='$OCF_RESKEY_fullname']" | grep RESOURCERECORDS | /usr/bin/awk '{ print $2 }' )"
+	ocf_log debug "Found IP address: $ARECORD ."
+	if [ ${ARECORD} != ${IPADDRESS} ]; then
+		ocf_log debug "No ARECORD found"
+		return $OCF_SUCCESS
+	else
+		# determine IP address
+		IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')"
+		# Patch file
+		ocf_log debug "Deleting IP address to ${IPADDRESS}"
+		return $OCF_SUCCESS
+	fi
+
+	_update_record "DELETE" "$IPADDRESS"
+	return $OCF_SUCCESS
+}
+
+ec2ip_start() {
+	IPADDRESS="$(ec2metadata aws ip | grep local-ipv4 | /usr/bin/awk '{ print $2 }')"
+	_update_record "UPSERT" "$IPADDRESS"
+	return $OCF_SUCCESS
+}
+
+###############################################################################
+
+case $__OCF_ACTION in
+	usage|help)
+		usage
+		exit $OCF_SUCCESS
+		;;
+	meta-data)
+		metadata
+		exit $OCF_SUCCESS
+		;;
+	monitor)
+		ec2ip_monitor
+		;;
+	stop)
+		ec2ip_stop
+		;;
+	validate-all)
+		ec2ip_validate
+		;;
+	start)
+		ec2ip_start
+		;;
+	*)
+		usage
+		exit $OCF_ERR_UNIMPLEMENTED
+		;;
+esac
diff --git a/heartbeat/awseip b/heartbeat/awseip
index 471954e..6b5fc5f 100755
--- a/heartbeat/awseip
+++ b/heartbeat/awseip
@@ -42,7 +42,7 @@
 #
 # Defaults
 #
-OCF_RESKEY_awscli_default="/usr/bin/awscli"
+OCF_RESKEY_awscli_default="/usr/bin/aws"
 OCF_RESKEY_api_delay_default="1"
 
 : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
@@ -56,9 +56,15 @@ meta_data() {
 <version>1.0</version>
 
 <longdesc lang="en">
-description
+Resource Agent for Amazon AWS Elastic IP Addresses.
+
+It manages AWS Elastic IP Addresses with awscli.
+
+Credentials needs to be setup by running "aws configure".
+
+See https://aws.amazon.com/cli/ for more information about awscli.
 </longdesc>
-<shortdesc lang="en"></shortdesc>
+<shortdesc lang="en">Amazon AWS Elastic IP Address Resource Agent</shortdesc>
 
 <parameters>
 
@@ -78,7 +84,7 @@ reserved elastic ip for ec2 instance
 <content type="string" default="" />
 </parameter>
 
-<parameter name="allocation_id" unique="1" required="0">
+<parameter name="allocation_id" unique="1" required="1">
 <longdesc lang="en">
 reserved allocation id for ec2 instance
 </longdesc>
@@ -132,7 +138,7 @@ END
 awseip_start() {
     awseip_monitor && return $OCF_SUCCESS
 
-    if [ -n "${ALLOCATION_ID}" ] && [ -n "${PRIVATE_IP_ADDRESS}" ]; then
+    if [ -n "${PRIVATE_IP_ADDRESS}" ]; then
         $AWSCLI ec2 associate-address  \
             --instance-id ${INSTANCE_ID} \
             --network-interface-id ${NETWORK_ID} \
@@ -142,7 +148,7 @@ awseip_start() {
     else
         $AWSCLI ec2 associate-address  \
             --instance-id ${INSTANCE_ID} \
-            --public-ip ${ELASTIC_IP}
+            --allocation-id ${ALLOCATION_ID}
         RET=$?
     fi
 
@@ -160,8 +166,10 @@ awseip_start() {
 awseip_stop() {
     awseip_monitor || return $OCF_SUCCESS
 
+    ASSOCIATION_ID=$($AWSCLI ec2 describe-addresses \
+                         --allocation-id ${ALLOCATION_ID} | grep -m 1 "AssociationId" | awk -F'"' '{print$4}')
     $AWSCLI ec2 disassociate-address  \
-        --public-ip ${ELASTIC_IP}
+        --association-id ${ASSOCIATION_ID}
     RET=$?
 
     # delay to avoid sending request too fast
@@ -189,8 +197,8 @@ awseip_validate() {
     check_binary ${AWSCLI}
 
     if [ -z "${INSTANCE_ID}" ]; then
-        ocf_log info "instant_id could not been found, is this EC2 instance?"
-        return $OCF_GENERIC
+        ocf_exit_reason "instance_id not found. Is this a EC2 instance?"
+        return $OCF_ERR_GENERIC
     fi
 
     return $OCF_SUCCESS
@@ -203,7 +211,6 @@ case $__OCF_ACTION in
         ;;
 esac 
 
-: ${OCF_RESKEY_awscli="/usr/bin/aws"}
 AWSCLI="${OCF_RESKEY_awscli}"
 ELASTIC_IP="${OCF_RESKEY_elastic_ip}"
 ALLOCATION_ID="${OCF_RESKEY_allocation_id}"
@@ -213,6 +220,7 @@ NETWORK_ID="$($AWSCLI ec2 describe-instances --instance-id ${INSTANCE_ID} | grep
 
 case $__OCF_ACTION in
     start)
+        awseip_validate
         awseip_start
         ;;
     stop)
diff --git a/heartbeat/awsvip b/heartbeat/awsvip
index f8da8d3..f36ad45 100755
--- a/heartbeat/awsvip
+++ b/heartbeat/awsvip
@@ -42,7 +42,7 @@
 #
 # Defaults
 #
-OCF_RESKEY_awscli_default="/usr/bin/awscli"
+OCF_RESKEY_awscli_default="/usr/bin/aws"
 OCF_RESKEY_api_delay_default="1"
 
 : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}}
@@ -56,9 +56,15 @@ meta_data() {
 <version>1.0</version>
 
 <longdesc lang="en">
-description
+Resource Agent for Amazon AWS Secondary Private IP Addresses.
+
+It manages AWS Secondary Private IP Addresses with awscli.
+
+Credentials needs to be setup by running "aws configure".
+
+See https://aws.amazon.com/cli/ for more information about awscli.
 </longdesc>
-<shortdesc lang="en"></shortdesc>
+<shortdesc lang="en">Amazon AWS Secondary Private IP Address Resource Agent</shortdesc>
 
 <parameters>
 
@@ -166,8 +172,8 @@ awsvip_validate() {
     check_binary ${AWSCLI}
 
     if [ -z "${INSTANCE_ID}" ]; then
-        ocf_log info "instant_id could not been found, is this EC2 instance?"
-        return $OCF_GENERIC
+        ocf_exit_reason "instance_id not found. Is this a EC2 instance?"
+        return $OCF_ERR_GENERIC
     fi
 
     return $OCF_SUCCESS
@@ -180,7 +186,6 @@ case $__OCF_ACTION in
         ;;
 esac
 
-: ${OCF_RESKEY_awscli="/usr/bin/aws"}
 AWSCLI="${OCF_RESKEY_awscli}"
 SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}"
 INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
@@ -188,6 +193,7 @@ NETWORK_ID="$($AWSCLI ec2 describe-instances --instance-id ${INSTANCE_ID} | grep
 
 case $__OCF_ACTION in
     start)
+        awsvip_validate
         awsvip_start
         ;;
     stop)
diff --git a/heartbeat/clvm b/heartbeat/clvm
index a778333..43b9302 100755
--- a/heartbeat/clvm
+++ b/heartbeat/clvm
@@ -78,7 +78,6 @@ is set to.
 <action name="start"        timeout="90" />
 <action name="stop"         timeout="90" />
 <action name="monitor"      timeout="90" interval="30" depth="0" />
-<action name="reload"       timeout="90" />
 <action name="meta-data"    timeout="10" />
 <action name="validate-all"   timeout="20" />
 </actions>
@@ -362,6 +361,12 @@ clvmd_start()
 		return $?
 	fi
 
+	# systemd drop-in to stop process before storage services during
+	# shutdown/reboot
+	if ps -p 1 | grep -q systemd ; then
+		systemd_drop_in "99-clvmd" "After" "blk-availability.service"
+	fi
+
 	clvmd_status
 	if [ $? -eq $OCF_SUCCESS ]; then
 		ocf_log debug "$DAEMON already started"
diff --git a/heartbeat/db2 b/heartbeat/db2
index c522699..63de315 100755
--- a/heartbeat/db2
+++ b/heartbeat/db2
@@ -650,7 +650,9 @@ db2_hadr_status() {
     fi
 
     echo "$output" |
-    awk '/^HADR is not active/ {print "Standard/Standalone"; exit; }
+    awk '/^\s+HADR_(ROLE|STATE) =/ {printf $3"/"}
+         /^\s+HADR_CONNECT_STATUS =/ {print $3; exit; }
+         /^HADR is not active/ {print "Standard/Standalone"; exit; }
          /^Role *State */ {getline; printf "%s/%s\n", $1, $2; exit; }'
 }
 
@@ -680,7 +682,7 @@ db2_monitor() {
 
         # set master preference accordingly
         case "$hadr" in
-            Primary/*|Standard/*)
+            PRIMARY/*|Primary/*|Standard/*)
             # perform  a basic health check
             CMD="if db2 connect to $db;
             then 
@@ -712,11 +714,11 @@ db2_monitor() {
             ocf_is_ms && master_score -v 10000 -l reboot
             ;;
 
-            Standby/*Peer)
+            STANDBY/PEER/*|Standby/*Peer)
             master_score -v 8000 -l reboot
             ;;
 
-            Standby/*)
+            STANDBY/*|Standby/*)
             ocf_log warn "DB2 database $instance($db2node)/$db in status $hadr can never be promoted"
             master_score -D -l reboot
             ;;
@@ -755,17 +757,17 @@ db2_promote() {
             return $OCF_SUCCESS
             ;;
 
-            Primary/Peer)
+            PRIMARY/PEER/*|PRIMARY/REMOTE_CATCHUP/*|Primary/Peer)
             # nothing to do, only update pacemaker's view
             echo MASTER > $STATE_FILE
             return $OCF_SUCCESS
             ;;
 
-            Standby/Peer)
+            STANDBY/PEER/CONNECTED|Standby/Peer)
             # must take over 
             ;;
 
-            Standby/DisconnectedPeer)
+            STANDBY/PEER/DISCONNECTED|Standby/DisconnectedPeer)
             # must take over forced 
             force="by force peer window only"
             ;;
diff --git a/heartbeat/docker b/heartbeat/docker
index 47f099e..49e7052 100755
--- a/heartbeat/docker
+++ b/heartbeat/docker
@@ -106,6 +106,15 @@ it has initialized.
 <content type="string"/>
 </parameter>
 
+<parameter name="mount_points" required="0" unique="0">
+<longdesc lang="en">
+A comma separated list of directories that the container is expecting to use.
+The agent will ensure they exist by running 'mkdir -p' 
+</longdesc>
+<shortdesc lang="en">Required mount points</shortdesc>
+<content type="string"/>
+</parameter>
+
 <parameter name="monitor_cmd" required="0" unique="0">
 <longdesc lang="en">
 Specifiy the full path of a command to launch within the container to check
@@ -146,6 +155,27 @@ will persist after the container stops.
 <content type="boolean"/>
 </parameter>
 
+<parameter name="query_docker_health" required="0" unique="0">
+<longdesc lang="en">
+Query the builtin healthcheck of docker (v1.12+) to determine health of the
+container. If left empty or set to false it will not be used.
+
+The healthcheck itself has to be configured within docker, e.g. via
+HEALTHCHECK in Dockerfile. This option just queries in what condition
+docker considers the container to be and lets ocf do its thing accordingly.
+
+Note that the time a container is in "starting" state counts against the
+monitor timeout.
+
+This is an additional check besides the standard check for the container
+to be running, and the optional monitor_cmd check. It doesn't disable or
+override them, so all of them (if used) have to come back healthy for the
+container to be considered healthy.
+</longdesc>
+<shortdesc lang="en">use healthcheck</shortdesc>
+<content type="boolean"/>
+</parameter>
+
 </parameters>
 
 <actions>
@@ -249,6 +279,46 @@ docker_simple_status()
 	return $OCF_NOT_RUNNING
 }
 
+docker_health_status()
+{
+
+	if ocf_is_true "$OCF_RESKEY_query_docker_health"; then
+                local val
+
+                container_exists
+                if [ $? -ne 0 ]; then
+                        return $OCF_NOT_RUNNING
+                fi
+
+                # retrieve the 'Health' attribute for the container
+                # This is a bash-style do-while loop to wait until instance is started.
+                # if starting takes longer than monitor timeout then upstream will make this fail.
+                while
+
+                        val=$(docker inspect --format {{.State.Health.Status}} $CONTAINER 2>/dev/null)
+                        if [ $? -ne 0 ]; then
+                                #not healthy as a result of container not being found
+                                return $OCF_NOT_RUNNING
+                        fi
+                        test "$val" = "starting"
+                do
+
+                        sleep 1
+                done
+
+                if [ "$val" = "healthy" ]; then
+                        # container exists and is healthy
+                        return $OCF_SUCCESS
+                fi
+
+                return $OCF_NOT_RUNNING
+	fi
+
+	return 0
+}
+
+
+
 docker_monitor()
 {
 	local rc=0
@@ -260,11 +330,28 @@ docker_monitor()
 		return $rc
 	fi
 
+	docker_health_status
+	rc=$?
+
+	if [ $rc -ne 0 ]; then
+		return $rc
+	fi
+
 	monitor_cmd_exec
 }
 
+docker_create_mounts() {
+	oldIFS="$IFS"
+	IFS=","
+	for directory in $OCF_RESKEY_mount_points; do
+		mkdir -p "$directory"
+	done
+	IFS="$oldIFS"
+}
+
 docker_start()
 {
+	docker_create_mounts
 	local run_opts="-d --name=${CONTAINER}"
 	# check to see if the container has already started
 	docker_simple_status
@@ -360,24 +447,31 @@ docker_stop()
 
 image_exists()
 {
-	# assume that OCF_RESKEY_name have been validated
-	local IMAGE_NAME="$(echo ${OCF_RESKEY_image} | awk -F':' '{print $1}')"
-
 	# if no tag was specified, use default "latest"
 	local COLON_FOUND=0
+	local SLASH_FOUND=0
+	local SERVER_NAME=""
+	local IMAGE_NAME="${OCF_RESKEY_image}"
 	local IMAGE_TAG="latest"
 
-	COLON_FOUND="$(echo "${OCF_RESKEY_image}" | grep -o ':' | grep -c .)"
+	SLASH_FOUND="$(echo "${OCF_RESKEY_image}" | grep -o '/' | grep -c .)"
+
+	if [ ${SLASH_FOUND} -ge 1 ]; then
+		SERVER_NAME="$(echo ${IMAGE_NAME} | cut -d / -f 1-${SLASH_FOUND})"
+		IMAGE_NAME="$(echo ${IMAGE_NAME} | awk -F'/' '{print $NF}')"
+	fi
 
-	if [ ${COLON_FOUND} -ne 0 ]; then
-		IMAGE_TAG="$(echo ${OCF_RESKEY_image} | awk -F':' '{print $NF}')"
+	COLON_FOUND="$(echo "${IMAGE_NAME}" | grep -o ':' | grep -c .)"
+	if [ ${COLON_FOUND} -ge 1 ]; then
+		IMAGE_TAG="$(echo ${IMAGE_NAME} | awk -F':' '{print $NF}')"
+		IMAGE_NAME="$(echo ${IMAGE_NAME} | cut -d : -f 1-${COLON_FOUND})"
 	fi
 
 	# IMAGE_NAME might be following formats:
 	# - image
-	# - repository/image
+	# - repository:port/image
 	# - docker.io/image (some distro will display "docker.io/" as prefix)
-	docker images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/)?${IMAGE_NAME}:${IMAGE_TAG}\$"
+	docker images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/|${SERVER_NAME}\/)?${IMAGE_NAME}:${IMAGE_TAG}\$"
 	if [ $? -eq 0 ]; then
 		# image found
 		return 0
diff --git a/heartbeat/exportfs b/heartbeat/exportfs
index 4c66b16..e70dbd3 100755
--- a/heartbeat/exportfs
+++ b/heartbeat/exportfs
@@ -79,8 +79,8 @@ The directory or directories to export.
 <parameter name="fsid" unique="1" required="1">
 <longdesc lang="en">
 The fsid option to pass to exportfs. This can be a unique positive
-integer, a UUID, or the special string "root" which is functionally
-identical to numeric fsid of 0.
+integer, a UUID (assuredly sans comma characters), or the special string
+"root" which is functionally identical to numeric fsid of 0.
 If multiple directories are being exported, then they are
 assigned ids sequentially starting with this fsid (fsid, fsid+1,
 fsid+2, ...). Obviously, in that case the fsid must be an
@@ -293,7 +293,7 @@ export_one() {
 	fi
 	if echo "$opts" | grep fsid >/dev/null; then
 		#replace fsid in options list
-		opts=`echo "$opts" | sed "s/fsid=[0-9]\+/fsid=$(get_fsid)/g"`
+		opts=`echo "$opts" | sed "s,fsid=[^,]*,fsid=$(get_fsid),g"`
 	else
 		#tack the fsid option onto our options list.
 		opts="${opts}${sep}fsid=$(get_fsid)"
@@ -420,6 +420,10 @@ testdir() {
 }
 exportfs_validate_all ()
 {
+	if echo "$OCF_RESKEY_fsid" | grep -q -F ','; then
+		ocf_exit_reason "$OCF_RESKEY_fsid cannot contain a comma"
+		return $OCF_ERR_CONFIGURED
+	fi
 	if [ $NUMDIRS -gt 1 ] &&
 			! ocf_is_decimal "$OCF_RESKEY_fsid"; then
 		ocf_exit_reason "use integer fsid when exporting multiple directories"
diff --git a/heartbeat/galera b/heartbeat/galera
index 0cab9a4..ee84514 100755
--- a/heartbeat/galera
+++ b/heartbeat/galera
@@ -68,6 +68,8 @@
 . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
 . ${OCF_FUNCTIONS_DIR}/mysql-common.sh
 
+NODENAME=$(ocf_attribute_target)
+
 # It is common for some galera instances to store
 # check user that can be used to query status
 # in this file
@@ -279,7 +281,7 @@ get_status_variable()
 
 set_bootstrap_node()
 {
-    local node=$1
+    local node=$(ocf_attribute_target $1)
 
     ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-bootstrap" -v "true"
 }
@@ -307,7 +309,7 @@ clear_no_grastate()
 
 is_no_grastate()
 {
-    local node=$1
+    local node=$(ocf_attribute_target $1)
     ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-no-grastate" -Q 2>/dev/null
 }
 
@@ -323,7 +325,7 @@ set_last_commit()
 
 get_last_commit()
 {
-    local node=$1
+    local node=$(ocf_attribute_target $1)
 
     if [ -z "$node" ]; then
        ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-last-committed" -Q 2>/dev/null
@@ -332,6 +334,27 @@ get_last_commit()
     fi
 }
 
+clear_safe_to_bootstrap()
+{
+    ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -D
+}
+
+set_safe_to_bootstrap()
+{
+    ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -v $1
+}
+
+get_safe_to_bootstrap()
+{
+    local node=$(ocf_attribute_target $1)
+
+    if [ -z "$node" ]; then
+        ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -Q 2>/dev/null
+    else
+        ${HA_SBIN_DIR}/crm_attribute -N $node -l reboot --name "${INSTANCE_ATTR_NAME}-safe-to-bootstrap" -Q 2>/dev/null
+    fi
+}
+
 wait_for_sync()
 {
     local state=$(get_status_variable "wsrep_local_state")
@@ -386,13 +409,13 @@ master_exists()
         return 1
     fi
     # determine if a master instance is already up and is healthy
-    crm_mon --as-xml | grep "resource.*id=\"${OCF_RESOURCE_INSTANCE}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1
+    crm_mon --as-xml | grep "resource.*id=\"${INSTANCE_ATTR_NAME}\".*role=\"Master\".*active=\"true\".*orphaned=\"false\".*failed=\"false\"" > /dev/null 2>&1
     return $?
 }
 
 clear_master_score()
 {
-    local node=$1
+    local node=$(ocf_attribute_target $1)
     if [ -z "$node" ]; then
         $CRM_MASTER -D
     else 
@@ -402,7 +425,7 @@ clear_master_score()
 
 set_master_score()
 {
-    local node=$1
+    local node=$(ocf_attribute_target $1)
 
     if [ -z "$node" ]; then
         $CRM_MASTER -v 100
@@ -415,6 +438,13 @@ promote_everyone()
 {
 
     for node in $(echo "$OCF_RESKEY_wsrep_cluster_address" | sed 's/gcomm:\/\///g' | tr -d ' ' | tr -s ',' ' '); do
+        local pcmk_node=$(galera_to_pcmk_name $node)
+        if [ -z "$pcmk_node" ]; then
+            ocf_log err "Could not determine pacemaker node from galera name <${node}>."
+            return
+        else
+            node=$pcmk_node
+        fi
 
         set_master_score $node
     done
@@ -451,17 +481,28 @@ pcmk_to_galera_name()
 detect_first_master()
 {
     local best_commit=0
-    local best_node="$NODENAME"
     local last_commit=0
     local missing_nodes=0
     local nodes=""
     local nodes_recovered=""
+    local all_nodes
+    local best_node_gcomm
+    local best_node
+    local safe_to_bootstrap
+
+    all_nodes=$(echo "$OCF_RESKEY_wsrep_cluster_address" | sed 's/gcomm:\/\///g' | tr -d ' ' | tr -s ',' ' ')
+    best_node_gcomm=$(echo "$all_nodes" | sed 's/^.* \(.*\)$/\1/')
+    best_node=$(galera_to_pcmk_name $best_node_gcomm)
+    if [ -z "$best_node" ]; then
+        ocf_log err "Could not determine initial best node from galera name <${best_node_gcomm}>."
+        return
+    fi
 
     # avoid selecting a recovered node as bootstrap if possible
-    for node in $(echo "$OCF_RESKEY_wsrep_cluster_address" | sed 's/gcomm:\/\///g' | tr -d ' ' | tr -s ',' ' '); do
+    for node in $all_nodes; do
         local pcmk_node=$(galera_to_pcmk_name $node)
         if [ -z "$pcmk_node" ]; then
-            ocf_log error "Could not determine pacemaker node from galera name <${node}>."
+            ocf_log err "Could not determine pacemaker node from galera name <${node}>."
             return
         else
             node=$pcmk_node
@@ -475,6 +516,19 @@ detect_first_master()
     done
 
     for node in $nodes_recovered $nodes; do
+        safe_to_bootstrap=$(get_safe_to_bootstrap $node)
+
+        if [ "$safe_to_bootstrap" = "1" ]; then
+            # Galera marked the node as safe to boostrap during shutdown. Let's just
+            # pick it as our bootstrap node.
+            ocf_log info "Node <${node}> is marked as safe to bootstrap."
+            best_node=$node
+
+            # We don't need to wait for the other nodes to report state in this case
+            missing_nodes=0
+            break
+        fi
+
         last_commit=$(get_last_commit $node)
 
         if [ -z "$last_commit" ]; then
@@ -490,7 +544,7 @@ detect_first_master()
 
         greater_than_equal_long "$last_commit" "$best_commit"
         if [ $? -eq 0 ]; then
-            best_node=$node
+            best_node=$(ocf_attribute_target $node)
             best_commit=$last_commit
         fi
 
@@ -505,6 +559,22 @@ detect_first_master()
     set_bootstrap_node $best_node
 }
 
+detect_safe_to_bootstrap()
+{
+    local safe_to_bootstrap=""
+
+    if [ -f ${OCF_RESKEY_datadir}/grastate.dat ]; then
+        ocf_log info "attempting to read safe_to_bootstrap flag from ${OCF_RESKEY_datadir}/grastate.dat"
+        safe_to_bootstrap=$(sed -n 's/^safe_to_bootstrap:\s*\(.*\)$/\1/p' < ${OCF_RESKEY_datadir}/grastate.dat)
+    fi
+
+    if [ "$safe_to_bootstrap" = "1" ] || [ "$safe_to_bootstrap" = "0" ]; then
+        set_safe_to_bootstrap $safe_to_bootstrap
+    else
+        clear_safe_to_bootstrap
+    fi
+}
+
 detect_last_commit()
 {
     local last_commit
@@ -516,10 +586,22 @@ detect_last_commit()
     local recovery_file_regex='s/.*WSREP\:.*position\s*recovery.*--log_error='\''\([^'\'']*\)'\''.*/\1/p'
     local recovered_position_regex='s/.*WSREP\:\s*[R|r]ecovered\s*position.*\:\(.*\)\s*$/\1/p'
 
+    # codership/galera#354
+    # Some ungraceful shutdowns can leave an empty gvwstate.dat on
+    # disk. This will prevent galera to join the cluster if it is
+    # configured to attempt PC recovery. Removing that file makes the
+    # node fall back to the normal, unoptimized joining process.
+    if [ -f ${OCF_RESKEY_datadir}/gvwstate.dat ] && \
+       [ ! -s ${OCF_RESKEY_datadir}/gvwstate.dat ]; then
+        ocf_log warn "empty ${OCF_RESKEY_datadir}/gvwstate.dat detected, removing it to prevent PC recovery failure at next restart"
+        rm -f ${OCF_RESKEY_datadir}/gvwstate.dat
+    fi
+
     ocf_log info "attempting to detect last commit version by reading ${OCF_RESKEY_datadir}/grastate.dat"
     last_commit="$(cat ${OCF_RESKEY_datadir}/grastate.dat | sed -n 's/^seqno.\s*\(.*\)\s*$/\1/p')"
     if [ -z "$last_commit" ] || [ "$last_commit" = "-1" ]; then
         local tmp=$(mktemp)
+        chown $OCF_RESKEY_user:$OCF_RESKEY_group $tmp
 
         # if we pass here because grastate.dat doesn't exist,
         # try not to bootstrap from this node if possible
@@ -578,7 +660,7 @@ galera_promote()
     local rc
     local extra_opts
     local bootstrap
-    
+    local safe_to_bootstrap
     master_exists
     if [ $? -eq 0 ]; then
         # join without bootstrapping
@@ -587,6 +669,11 @@ galera_promote()
         bootstrap=$(is_bootstrap)
 
         if ocf_is_true $bootstrap; then
+            # The best node for bootstrapping wasn't cleanly shutdown. Allow
+            # bootstrapping anyways
+            if [ "$(get_safe_to_bootstrap)" = "0" ]; then
+                sed -ie 's/^\(safe_to_bootstrap:\) 0/\1 1/' ${OCF_RESKEY_datadir}/grastate.dat
+            fi
             ocf_log info "Node <${NODENAME}> is bootstrapping the cluster"
             extra_opts="--wsrep-cluster-address=gcomm://"
         else
@@ -603,12 +690,14 @@ galera_promote()
             clear_bootstrap_node
             ocf_log info "boostrap node already up, promoting the rest of the galera instances."
         fi
+        clear_safe_to_bootstrap
         clear_last_commit
         return $OCF_SUCCESS
     fi
 
-    # last commit is no longer relevant once promoted
+    # last commit/safe_to_bootstrap flag are no longer relevant once promoted
     clear_last_commit
+    clear_safe_to_bootstrap
 
     mysql_common_prepare_dirs
     mysql_common_start "$extra_opts"
@@ -669,6 +758,7 @@ galera_demote()
     clear_bootstrap_node
     clear_last_commit
     clear_no_grastate
+    clear_safe_to_bootstrap
 
     # Clear master score here rather than letting pacemaker do so once
     # demote finishes. This way a promote cannot take place right
@@ -678,6 +768,7 @@ galera_demote()
     clear_master_score
 
     # record last commit for next promotion
+    detect_safe_to_bootstrap
     detect_last_commit
     rc=$?
     return $rc
@@ -708,6 +799,7 @@ galera_start()
 
     mysql_common_prepare_dirs
 
+    detect_safe_to_bootstrap
     detect_last_commit
     rc=$?
     if [ $rc -ne $OCF_SUCCESS ]; then
@@ -797,6 +889,7 @@ galera_stop()
     mysql_common_stop
     rc=$1
 
+    clear_safe_to_bootstrap
     clear_last_commit
     clear_master_score
     clear_bootstrap_node
diff --git a/heartbeat/iSCSILogicalUnit b/heartbeat/iSCSILogicalUnit
index 0a07c5f..11bee9c 100755
--- a/heartbeat/iSCSILogicalUnit
+++ b/heartbeat/iSCSILogicalUnit
@@ -56,7 +56,7 @@ OCF_RESKEY_scsi_id_default="${OCF_RESOURCE_INSTANCE:0:16}"
 : ${OCF_RESKEY_scsi_id=${OCF_RESKEY_scsi_id_default}}
 # To have a reasonably unique default SCSI SN, use the first 8 bytes
 # of an MD5 hash of of $OCF_RESOURCE_INSTANCE
-sn=`echo -n "${OCF_RESOURCE_INSTANCE}" | openssl md5 | sed -e 's/(stdin)= //'`
+sn=`echo -n "${OCF_RESOURCE_INSTANCE}" | md5sum | sed -e 's/ .*//'`
 OCF_RESKEY_scsi_sn_default=${sn:0:8}
 : ${OCF_RESKEY_scsi_sn=${OCF_RESKEY_scsi_sn_default}}
 # set 0 as a default value for lio iblock device number
@@ -75,6 +75,8 @@ OCF_RESKEY_lio_iblock=${OCF_RESKEY_lio_iblock:-$OCF_RESKEY_lio_iblock_default}
 # OCF_RESKEY_tgt_bsopts
 # OCF_RESKEY_tgt_device_type
 
+# targetcli: iSCSITarget and iSCSILogicalUnit must use the same lockfile
+TARGETLOCKFILE=${HA_RSCTMP}/targetcli.lock
 #######################################################################
 
 meta_data() {
@@ -145,6 +147,33 @@ The default is a hash of the resource name, truncated to 8 bytes.
 <content type="string" default="${OCF_RESKEY_scsi_sn_default}"/>
 </parameter>
 
+<parameter name="emulate_tpu" required="0" unique="0">
+<longdesc lang="en">
+The SCSI UNMAP command to be configured for this Logical Unit.
+Setting this integer to 1 will enable TPU IOCTL emulation.
+</longdesc>
+<shortdesc lang="en">SCSI UNMAP (for TRIM / DISCARD)</shortdesc>
+<content type="integer" />
+</parameter>
+
+<parameter name="emulate_3pc" required="0" unique="0">
+<longdesc lang="en">
+The SCSI EXTENDED COPY command to be configured for this Logical Unit.
+Setting this integer to 1 will enable 3PC IOCTL emulation.
+</longdesc>
+<shortdesc lang="en">SCSI extended write</shortdesc>
+<content type="integer" />
+</parameter>
+
+<parameter name="emulate_caw" required="0" unique="0">
+<longdesc lang="en">
+The SCSI Compare and Write command to be configured for this Logical Unit.
+Setting this integer to 1 will enable CAW IOCTL emulation.
+</longdesc>
+<shortdesc lang="en">SCSI compare and write</shortdesc>
+<content type="integer" />
+</parameter>
+
 <parameter name="vendor_id" required="0" unique="0">
 <longdesc lang="en">
 The SCSI vendor ID to be configured for this Logical Unit.
@@ -371,6 +400,9 @@ iSCSILogicalUnit_start() {
 		fi
 		;;
 	lio-t)
+		ocf_take_lock $TARGETLOCKFILE
+		ocf_release_lock_on_exit $TARGETLOCKFILE
+		iblock_attrib_path="/sys/kernel/config/target/core/iblock_${OCF_RESKEY_lio_iblock}/${OCF_RESOURCE_INSTANCE}/attrib"
 		# For lio, we first have to create a target device, then
 		# add it to the Target Portal Group as an LU.
 		ocf_run targetcli /backstores/block create name=${OCF_RESOURCE_INSTANCE} dev=${OCF_RESKEY_path} || exit $OCF_ERR_GENERIC
@@ -379,12 +411,27 @@ iSCSILogicalUnit_start() {
 		fi
 		ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/luns create /backstores/block/${OCF_RESOURCE_INSTANCE} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC
 
+		if $(ip a | grep -q inet6); then
+			ocf_run -q targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/portals delete 0.0.0.0 3260
+			ocf_run -q targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/portals create ::0
+		fi
+
 		if [ -n "${OCF_RESKEY_allowed_initiators}" ]; then
 			for initiator in ${OCF_RESKEY_allowed_initiators}; do
 				ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls create ${initiator} add_mapped_luns=False || exit $OCF_ERR_GENERIC
 				ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/${initiator} create ${OCF_RESKEY_lun} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC
 			done
 		fi
+
+		if [ -n "${OCF_RESKEY_emulate_tpu}" ]; then
+			echo ${OCF_RESKEY_emulate_tpu} > ${iblock_attrib_path}/emulate_tpu || exit $OCF_ERR_GENERIC
+		fi
+		if [ -n "${OCF_RESKEY_emulate_3pc}" ]; then
+			echo ${OCF_RESKEY_emulate_3pc} > ${iblock_attrib_path}/emulate_3pc || exit $OCF_ERR_GENERIC
+		fi
+		if [ -n "${OCF_RESKEY_emulate_caw}" ]; then
+			echo ${OCF_RESKEY_emulate_caw} > ${iblock_attrib_path}/emulate_caw || exit $OCF_ERR_GENERIC
+		fi
 		;;
 	esac
 
@@ -440,6 +487,8 @@ iSCSILogicalUnit_stop() {
 		fi
 		;;
 	lio-t)
+		ocf_take_lock $TARGETLOCKFILE
+		ocf_release_lock_on_exit $TARGETLOCKFILE
 		# "targetcli delete" will fail if the LUN is already
 		# gone. Log a warning and still push ahead.
 		ocf_run -warn targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/luns delete ${OCF_RESKEY_lun}
@@ -594,13 +643,13 @@ iSCSILogicalUnit_validate() {
 	iet)
 		# IET does not support setting the vendor and product ID
 		# (it always uses "IET" and "VIRTUAL-DISK")
-		unsupported_params="vendor_id product_id allowed_initiators lio_iblock tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type"
+		unsupported_params="vendor_id product_id allowed_initiators lio_iblock tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type emulate_tpu emulate_3pc emulate_caw"
 		;;
 	tgt)
-		unsupported_params="allowed_initiators lio_iblock"
+		unsupported_params="allowed_initiators lio_iblock emulate_tpu emulate_3pc emulate_caw"
 		;;
 	lio)
-		unsupported_params="scsi_id vendor_id product_id tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type"
+		unsupported_params="scsi_id vendor_id product_id tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type emulate_tpu emulate_3pc emulate_caw"
 		;;
 	lio-t)
 		unsupported_params="scsi_id vendor_id product_id tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type lio_iblock"
diff --git a/heartbeat/iSCSITarget b/heartbeat/iSCSITarget
index 08a7656..2f84220 100755
--- a/heartbeat/iSCSITarget
+++ b/heartbeat/iSCSITarget
@@ -50,6 +50,9 @@ OCF_RESKEY_portals_default="0.0.0.0:3260"
 
 # Lockfile, used for selecting a target ID
 LOCKFILE=${HA_RSCTMP}/iSCSITarget-${OCF_RESKEY_implementation}.lock
+
+# targetcli: iSCSITarget and iSCSILogicalUnit must use the same lockfile
+TARGETLOCKFILE=${HA_RSCTMP}/targetcli.lock
 #######################################################################
 
 meta_data() {
@@ -334,6 +337,8 @@ iSCSITarget_start() {
 		# number 1. In lio, creating a network portal
 		# automatically creates the corresponding target if it
 		# doesn't already exist.
+		ocf_take_lock $TARGETLOCKFILE
+		ocf_release_lock_on_exit $TARGETLOCKFILE
 		ocf_run targetcli /iscsi set global auto_add_default_portal=false || exit $OCF_ERR_GENERIC
 		ocf_run targetcli /iscsi create ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC
 		for portal in ${OCF_RESKEY_portals}; do
@@ -499,6 +504,8 @@ iSCSITarget_stop() {
 		ocf_run lio_node --deliqn ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC
 		;;
 	lio-t)
+		ocf_take_lock $TARGETLOCKFILE
+		ocf_release_lock_on_exit $TARGETLOCKFILE
 		ocf_run targetcli /iscsi delete ${OCF_RESKEY_iqn} || exit $OCF_ERR_GENERIC
 		;;
 	esac
diff --git a/heartbeat/kamailio b/heartbeat/kamailio
index 0968ac6..e05c3b6 100755
--- a/heartbeat/kamailio
+++ b/heartbeat/kamailio
@@ -36,8 +36,11 @@
 #  OCF_RESKEY_port
 #  OCF_RESKEY_proto
 #  OCF_RESKEY_sipsak
+#  OCF_RESKEY_kamctl
 #  OCF_RESKEY_kamctlrc
 #  OCF_RESKEY_kamuser
+#  OCF_RESKEY_kamgroup
+#  OCF_RESKEY_extra_options
 
 # Initialization:
 
@@ -54,8 +57,11 @@ RESKEY_monitoring_ip_default=127.0.0.1
 RESKEY_port_default=5060
 RESKEY_proto_default="udptcp"
 RESKEY_sipsak_default="/usr/bin/sipsak"
+RESKEY_kamctl_default="/usr/bin/kamctl"
 RESKEY_kamctlrc_default="/etc/kamailio/kamctlrc"
 RESKEY_kamuser_default=""
+RESKEY_kamgroup_default=""
+RESKEY_extra_options_default=""
 
 #######################################################################
 : ${OCF_RESKEY_binary=${RESKEY_binary_default}}
@@ -65,19 +71,22 @@ RESKEY_kamuser_default=""
 : ${OCF_RESKEY_port=${RESKEY_port_default}}
 : ${OCF_RESKEY_proto=${RESKEY_proto_default}}
 : ${OCF_RESKEY_sipsak=${RESKEY_sipsak_default}}
+: ${OCF_RESKEY_kamctl=${RESKEY_kamctl_default}}
 : ${OCF_RESKEY_kamctlrc=${RESKEY_kamctlrc_default}}
 : ${OCF_RESKEY_kamuser=${RESKEY_kamuser_default}}
+: ${OCF_RESKEY_kamgroup=${RESKEY_kamgroup_default}}
+: ${OCF_RESKEY_extra_options=${RESKEY_extra_options_default}}
 
 #######################################################################
 usage() {
-	cat <<END
+  cat <<END
 usage: $0 {start|stop|status|monitor|validate-all|meta-data}
 Expects to have a fully populated OCF RA-compliant environment set.
 END
 }
 
 meta_data() {
-	cat <<END
+  cat <<END
 <?xml version="1.0"?>
 <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
 <resource-agent name="kamailio">
@@ -126,7 +135,7 @@ Parameters for a third Kamailio instance:
 
   <parameter name="conffile" unique="0" required="0">
    <longdesc lang="en">
-    The kamailio configuration file name with full path. 
+    The kamailio configuration file name with full path.
     For example, "/etc/kamailio/kamailio.cfg" , which is the default value.
     Make sure to use unique names in case of having multiple instances.
    </longdesc>
@@ -188,9 +197,21 @@ Parameters for a third Kamailio instance:
    <content type="string" default="${RESKEY_port_default}" />
   </parameter>
 
+  <parameter name="extra_options" unique="0" required="0">
+   <longdesc lang="en">
+    Extra options to add to kamailio start.
+   </longdesc>
+   <shortdesc lang="en">extra_options</shortdesc>
+   <content type="string" default="${RESKEY_extra_options}" />
+  </parameter>
+
+
   <parameter name="proto" unique="0" required="0">
    <longdesc lang="en">
-    The protocol used for SIP proto  =  udp|tcp|udptcp. 
+    The protocol used for SIP proto  =  udp|tcp|udptcp|conf_udp|conf_tcp|conf_udptcp.
+    Using the options "conf_*" does not add any "-l" parameters to the kamailio command,
+    the "listen" parameters from kamailio.conf are used instead. The sipsak checks are
+    performed depending what protocol is defined after the underscore.
    </longdesc>
    <shortdesc lang="en">protocol</shortdesc>
    <content type="string" default="${RESKEY_proto_default}" />
@@ -201,10 +222,18 @@ Parameters for a third Kamailio instance:
     The installation path of the sipsak tool, which is used 
     for monitoring Kamailio via SIP OPTIONS polling. 
    </longdesc>
-   <shortdesc lang="en">protocol</shortdesc>
+   <shortdesc lang="en">sipsak path</shortdesc>
    <content type="string" default="${RESKEY_sipsak_default}" />
   </parameter>
 
+  <parameter name="kamctl" unique="0" required="0">
+   <longdesc lang="en">
+    The installation path of the "kamctl" control tool. 
+   </longdesc>
+   <shortdesc lang="en">kamctl path</shortdesc>
+   <content type="string" default="${RESKEY_kamctl_default}" />
+  </parameter>
+
   <parameter name="kamctlrc" unique="0" required="0">
    <longdesc lang="en">
     The location of the "kamctlrc" file for the Kamailio instance.
@@ -232,7 +261,7 @@ Parameters for a third Kamailio instance:
     configured per cluster node.
 
    </longdesc>
-   <shortdesc lang="en">protocol</shortdesc>
+   <shortdesc lang="en">kamctlrc path</shortdesc>
    <content type="string" default="${RESKEY_kamctlrc_default}" />
   </parameter>
 
@@ -245,6 +274,15 @@ Parameters for a third Kamailio instance:
    <shortdesc lang="en">kamailio user</shortdesc>
    <content type="string" default="${RESKEY_kamuser_default}" />
   </parameter>
+
+  <parameter name="kamgroup" unique="0" required="0">
+   <longdesc lang="en">
+    The group for kamailio process to run with.
+    Uses the current group, if not specified or empty.
+   </longdesc>
+   <shortdesc lang="en">kamailio group</shortdesc>
+   <content type="string" default="${RESKEY_kamgroup_default}" />
+  </parameter>
 </parameters>
 
  <actions>
@@ -321,11 +359,27 @@ kamailio_cmd()
               listen_param2="-l tcp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l tcp:127.0.0.1:${OCF_RESKEY_port}"
               listen_param="${listen_param1} ${listen_param2}"
            ;;
+    conf_*)
+           # doing nothing, no listen_param set
+           ;;
     *)  listen_param="-T"
            ;;
     esac
 
-    kam_cmd="${OCF_RESKEY_binary} -P ${OCF_RESKEY_pidfile} -f ${OCF_RESKEY_conffile} $listen_param"
+    kam_cmd="${OCF_RESKEY_binary} -P ${OCF_RESKEY_pidfile} -f ${OCF_RESKEY_conffile}"
+
+    if [ -n "${listen_param}" ]; then
+        kam_cmd="${kam_cmd} ${listen_param}"
+    fi
+    if [ -n "${OCF_RESKEY_kamuser}" ]; then
+        kam_cmd="${kam_cmd} -u ${OCF_RESKEY_kamuser}"
+    fi
+    if [ -n "${OCF_RESKEY_kamgroup}" ]; then
+        kam_cmd="${kam_cmd} -g ${OCF_RESKEY_kamgroup}"
+    fi
+    if [ -n "${OCF_RESKEY_extra_options}" ]; then
+        kam_cmd="${kam_cmd} ${OCF_RESKEY_extra_options}"
+    fi
 }
 
 ###
@@ -364,7 +418,6 @@ kamailio_get_pid() {
         return 2
     fi
 
-
     # No PID file found!
     # Check if still a process exists even though we don't have the PID any longer:
     kamailio_cmd
@@ -414,7 +467,7 @@ kamailio_status() {
         # to be avoided.
         # In order to be on the safe side, we run this check therefore under "timeout" control:
         rc=1
-        timeout 3 kamctl monitor 1 |grep "Up since" ; rc=$?
+        timeout 3 ${OCF_RESKEY_kamctl} monitor 1 |grep "since" ; rc=$?
     fi
 
     if [ $rc -ne 0 ]; then
diff --git a/heartbeat/lvm-clvm.sh b/heartbeat/lvm-clvm.sh
new file mode 100644
index 0000000..9bc0f75
--- /dev/null
+++ b/heartbeat/lvm-clvm.sh
@@ -0,0 +1,86 @@
+# lvm-clvmd.sh
+#
+# Description: LVM management with clvmd
+#
+#
+# Author:      Resource agents contributors
+#              Interface to LVM by Dejan Muhamedagic
+# Support:     users at clusterlabs.org
+# License:     GNU General Public License (GPL)
+# Copyright:   (C) 2017 Dejan Muhamedagic
+#
+
+##
+# Attempt to deactivate vg cluster wide and then start the vg exclusively
+##
+retry_exclusive_start()
+{
+	# Deactivate each LV in the group one by one cluster wide
+	set -- $(lvs -o name,attr --noheadings $OCF_RESKEY_volgrpname 2> /dev/null)
+	while [ $# -ge 2 ]; do
+		case $2 in
+		????ao*)
+			# open LVs cannot be deactivated.
+			return $OCF_ERR_GENERIC;;
+		*)
+			if ! lvchange -an $OCF_RESKEY_volgrpname/$1; then
+				ocf_exit_reason "Unable to perform required deactivation of $OCF_RESKEY_volgrpname/$1 before starting"
+				return $OCF_ERR_GENERIC
+			fi
+			;;
+		esac
+		shift 2
+	done
+
+	ocf_run vgchange $vgchange_activate_options $OCF_RESKEY_volgrpname
+}
+
+#
+# the interface to the LVM RA
+#
+
+lvm_init() {
+	vgchange_activate_options="-aey"
+	vgchange_deactivate_options="-an"
+}
+
+lvm_validate_all() {
+	if ! ps -C clvmd > /dev/null 2>&1; then
+		ocf_exit_reason "$OCF_RESKEY_volgrpname has the cluster attribute set, but 'clvmd' is not running"
+		exit $OCF_ERR_GENERIC
+	fi
+}
+
+lvm_status() {
+	return 0
+}
+
+lvm_pre_activate() {
+	return 0
+}
+
+lvm_post_activate() {
+	local rc=$1
+	if [ $rc -ne 0 ]; then
+		# Failure to exclusively activate cluster vg.:
+		# This could be caused by a remotely active LV, Attempt
+		# to disable volume group cluster wide and try again.
+		# Allow for some settling
+		sleep 5
+		if ! retry_exclusive_start; then
+			return $OCF_ERR_GENERIC
+		fi
+	fi
+	return $rc
+}
+
+lvm_pre_deactivate() {
+	return 0
+}
+
+lvm_post_deactivate() {
+	local rc=$1
+	return $rc
+}
+
+# vim:tabstop=4:shiftwidth=4:textwidth=0:wrapmargin=0
diff --git a/heartbeat/lvm-plain.sh b/heartbeat/lvm-plain.sh
new file mode 100644
index 0000000..f533285
--- /dev/null
+++ b/heartbeat/lvm-plain.sh
@@ -0,0 +1,62 @@
+# lvm-plain.sh
+#
+# Description: LVM management with no VG protection
+#
+#
+# Author:      Dejan Muhamedagic
+# Support:     users at clusterlabs.org
+# License:     GNU General Public License (GPL)
+# Copyright:   (C) 2017 Dejan Muhamedagic
+#
+
+#
+# interface to the LVM RA
+#
+
+# apart from the standard vgchange options,
+# this is mostly a template
+# please copy and modify appropriately
+# when adding new VG protection mechanisms
+
+# lvm_init sets the vgchange options:
+#   vgchange_activate_options
+#   vgchange_deactivate_options
+# (for both activate and deactivate)
+
+lvm_init() {
+	vgchange_activate_options="-aly"
+	vgchange_deactivate_options="-aln"
+	# for clones (clustered volume groups), we'll also have to force
+	# monitoring, even if disabled in lvm.conf.
+	if ocf_is_clone; then
+		vgchange_activate_options="$vgchange_activate_options --monitor y"
+	fi
+}
+
+lvm_validate_all() {
+	: nothing to validate
+}
+
+lvm_status() {
+	return 0
+}
+
+lvm_pre_activate() {
+	return 0
+}
+
+lvm_post_activate() {
+	local rc=$1
+	return $rc
+}
+
+lvm_pre_deactivate() {
+	return 0
+}
+
+lvm_post_deactivate() {
+	local rc=$1
+	return $rc
+}
+
+# vim:tabstop=4:shiftwidth=4:textwidth=0:wrapmargin=0
diff --git a/heartbeat/lvm-tag.sh b/heartbeat/lvm-tag.sh
new file mode 100644
index 0000000..71f53b2
--- /dev/null
+++ b/heartbeat/lvm-tag.sh
@@ -0,0 +1,205 @@
+# lvm-tag.sh
+#
+# Description: LVM management with tags
+#
+#
+# Author:      David Vossel
+#              Interface to LVM by Dejan Muhamedagic
+# Support:     users at clusterlabs.org
+# License:     GNU General Public License (GPL)
+# Copyright:   (C) 2017 Dejan Muhamedagic
+#
+
+##
+# Verify tags setup
+##
+
+verify_tags_environment()
+{
+	##
+	# The volume_list must be initialized to something in order to
+	# guarantee our tag will be filtered on startup
+	##
+	if ! lvm dumpconfig activation/volume_list; then
+		ocf_log err  "LVM: Improper setup detected"
+		ocf_exit_reason "The volume_list filter must be initialized in lvm.conf for exclusive activation without clvmd"
+		return $OCF_ERR_GENERIC
+	fi
+
+	##
+	# Our tag must _NOT_ be in the volume_list.  This agent
+	# overrides the volume_list during activation using the
+	# special tag reserved for cluster activation
+	##
+	if lvm dumpconfig activation/volume_list | grep -e "\"@$OUR_TAG\"" -e "\"${OCF_RESKEY_volgrpname}\""; then
+		ocf_log err "LVM:  Improper setup detected"
+		ocf_exit_reason "The volume_list in lvm.conf must not contain the cluster tag, \"$OUR_TAG\", or volume group, $OCF_RESKEY_volgrpname"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+check_initrd_warning()
+{
+	# First check to see if there is an initrd img we can safely
+	# compare timestamps agaist.  If not, don't even bother with
+	# this check.  This is known to work in rhel/fedora distros
+	ls "/boot/*$(uname -r)*.img" > /dev/null 2>&1
+	if [ $? -ne 0 ]; then
+		return
+	fi
+
+	##
+	# Now check to see if the initrd has been updated.
+	# If not, the machine could boot and activate the VG outside
+	# the control of pacemaker
+	##
+	if [ "$(find /boot -name *.img -newer /etc/lvm/lvm.conf)" = "" ]; then
+		ocf_log warn "LVM:  Improper setup detected"
+		ocf_log warn "* initrd image needs to be newer than lvm.conf"
+
+		# While dangerous if not done the first time, there are many
+		# cases where we don't simply want to fail here.  Instead,
+		# keep warning until the user remakes the initrd - or has
+		# it done for them by upgrading the kernel.
+		#
+		# initrd can be updated using this command.
+		# dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r)
+		#
+	fi
+}
+
+##
+# does this vg have our tag
+##
+check_tags()
+{
+	local owner=`vgs -o tags --noheadings $OCF_RESKEY_volgrpname | tr -d ' '`
+
+	if [ -z "$owner" ]; then
+		# No-one owns this VG yet
+		return 1
+	fi
+
+	if [ "$OUR_TAG" = "$owner" ]; then
+		# yep, this is ours
+		return 0
+	fi
+
+	# some other tag is set on this vg
+	return 2
+}
+
+strip_tags()
+{
+	local i
+
+	for i in `vgs --noheadings -o tags $OCF_RESKEY_volgrpname | sed s/","/" "/g`; do
+		ocf_log info "Stripping tag, $i"
+
+		# LVM version 2.02.98 allows changing tags if PARTIAL
+		vgchange --deltag $i $OCF_RESKEY_volgrpname
+	done
+
+	if [ ! -z `vgs -o tags --noheadings $OCF_RESKEY_volgrpname | tr -d ' '` ]; then
+		ocf_exit_reason "Failed to remove ownership tags from $OCF_RESKEY_volgrpname"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+set_tags()
+{
+	check_tags
+	case $? in
+	0)
+		# we already own it.
+		return $OCF_SUCCESS
+		;;
+	2)
+		# other tags are set, strip them before setting
+		if ! strip_tags; then
+			return $OCF_ERR_GENERIC
+		fi
+		;;
+	*)
+		: ;;
+	esac
+
+	vgchange --addtag $OUR_TAG $OCF_RESKEY_volgrpname
+	if [ $? -ne 0 ]; then
+		ocf_exit_reason "Failed to add ownership tag to $OCF_RESKEY_volgrpname"
+		return $OCF_ERR_GENERIC
+	fi
+
+	ocf_log info "New tag \"$OUR_TAG\" added to $OCF_RESKEY_volgrpname"
+	return $OCF_SUCCESS
+}
+
+#
+# interface to LVM
+#
+
+lvm_init() {
+	OUR_TAG="pacemaker"
+	if [ -n "$OCF_RESKEY_tag" ]; then
+		OUR_TAG=$OCF_RESKEY_tag
+	fi
+	vgchange_activate_options="aly --config activation{volume_list=[\"@${OUR_TAG}\"]}"
+	vgchange_deactivate_options="-aln"
+}
+
+lvm_validate_all() {
+	if ! verify_tags_environment; then
+		exit $OCF_ERR_GENERIC
+	fi
+}
+
+lvm_status() {
+	local rc=0
+
+	# If vg is running, make sure the correct tag is present. Otherwise we
+	# can not guarantee exclusive activation.
+	if ! check_tags; then
+		ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\""
+		rc=$OCF_ERR_GENERIC
+	fi
+
+	# make sure the environment for tags activation is still valid
+	if ! verify_tags_environment; then
+		rc=$OCF_ERR_GENERIC
+	fi
+	# let the user know if their initrd is older than lvm.conf.
+	check_initrd_warning
+
+	return $rc
+}
+
+lvm_pre_activate() {
+	if ! set_tags; then
+		return $OCF_ERR_GENERIC
+	fi
+	return 0
+}
+
+lvm_post_activate() {
+	local rc=$1
+	return $rc
+}
+
+lvm_pre_deactivate() {
+	return 0
+}
+
+lvm_post_deactivate() {
+	local rc=$1
+	if [ $rc -eq 0 ]; then
+		strip_tags
+		rc=$?
+	fi
+	return $rc
+}
+
+# vim:tabstop=4:shiftwidth=4:textwidth=0:wrapmargin=0
diff --git a/heartbeat/lvmlockd b/heartbeat/lvmlockd
new file mode 100755
index 0000000..645cf81
--- /dev/null
+++ b/heartbeat/lvmlockd
@@ -0,0 +1,352 @@
+#!/bin/sh
+#
+#
+#	lvmlockd OCF Resource Agent
+#
+# Copyright (c) 2017 SUSE LINUX, Eric Ren
+#			All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like.	Any license provided herein, whether implied or
+# otherwise, applies only to this software file.  Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+#######################################################################
+
+meta_data() {
+	cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="lvmlockd">
+<version>1.0</version>
+
+<longdesc lang="en">
+This agent manages the lvmlockd daemon. "lvmlockd" is like "clvmd". Both
+are used by LVM commands to coordinate access to shared storage, but with
+different design and implementations. "lvmlockd" can use two lock managers:
+dlm and sanlock. This agent only supports "dlm + lvmlockd". If dlm (or corosync)
+are already being used by other cluster software, you are advised to select
+dlm, then configure "controld" resource agent for dlm and this agent for "lvmlockd".
+Otherwise, consider sanlock for "lvmlockd" if dlm/corosync is not required.
+For more information, refer to manpage lvmlockd.8.
+</longdesc>
+<shortdesc lang="en">This agent manages the lvmlockd daemon</shortdesc>
+
+<parameters>
+<parameter name="pidfile" unique="0">
+<longdesc lang="en">pid file</longdesc>
+<shortdesc lang="en">pid file</shortdesc>
+<content type="string" default="/run/lvmlockd.pid"/>
+</parameter>
+
+<parameter name="socket_path" unique="0">
+<longdesc lang="en">Set the socket path to listen on.</longdesc>
+<shortdesc lang="en">socket path</shortdesc>
+<content type="string" default="/run/lvm/lvmlockd.socket"/>
+</parameter>
+
+<parameter name="syslog_priority" unique="0">
+<longdesc lang="en">Write log messages from this level up to syslog.</longdesc>
+<shortdesc lang="en">syslog priority</shortdesc>
+<content type="string" default="warning"/>
+</parameter>
+
+<parameter name="adopt" unique="0">
+<longdesc lang="en">
+Adopt locks from a previous instance of lvmlockd.
+</longdesc>
+<shortdesc lang="en">Adopt locks from a previous instance of lvmlockd</shortdesc>
+<content type="integer" default="1"/>
+</parameter>
+</parameters>
+
+<actions>
+<action name="start"		timeout="90" />
+<action name="stop"		timeout="90" />
+<action name="monitor"		timeout="90" interval="30" depth="0" />
+<action name="meta-data"	timeout="10" />
+<action name="validate-all"	timeout="20" />
+</actions>
+</resource-agent>
+END
+}
+
+#######################################################################
+
+: ${OCF_RESKEY_pidfile:="/run/lvmlockd.pid"}
+
+LOCKD="lvmlockd"
+# 0.5s sleep each count
+TIMEOUT_COUNT=20
+
+usage() {
+	cat <<END
+usage: $0 {start|stop|monitor|validate-all|meta-data}
+END
+}
+
+get_pid()
+{
+	if [ -f ${OCF_RESKEY_pidfile} ] ; then
+		cat ${OCF_RESKEY_pidfile}
+	else
+		false
+	fi
+}
+
+daemon_is_running()
+{
+	local pid=$1
+
+	# Use /proc if it exists there
+	if [ -d /proc ] && [ -d /proc/1 ] ; then
+		[ -d /proc/"$pid" ]
+	else
+		kill -s 0 "$pid" >/dev/null 2>&1
+	fi
+}
+
+silent_status()
+{
+	local pid=$(get_pid)
+
+	if [ -n "$pid" ] ; then
+		daemon_is_running "$pid"
+	else
+		# No pid file
+		false
+	fi
+}
+
+check_config()
+{
+	local out=""
+	local use_lvmlockd=""
+	local lock_type=""
+
+	# To use lvmlockd, ensure configure lvm.conf:
+	# locking_type = 1
+	# use_lvmlockd = 1
+	out=$(lvmconfig 'global/use_lvmlockd')
+	use_lvmlockd=$(echo "$out" | cut -d'=' -f2)
+
+	out=$(lvmconfig 'global/locking_type')
+	lock_type=$(echo "$out" | cut -d'=' -f2)
+
+	if [ "$use_lvmlockd" -ne 1 ] ; then
+		ocf_log info "lvmlockd is not enabled, please ensure \"use_lvmlockd=1\""
+	fi
+	if [ "$lock_type" -ne 1 ] ; then
+		ocf_log info "locking type is wrong, please ensure \"locking_type=1\""
+	fi
+
+	if [ "$use_lvmlockd" -ne 1 ] || [ "$lock_type" -ne 1 ] ; then
+		ocf_exit_reason "Improper configuration to use lvmlockd."
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	return $OCF_SUCCESS
+}
+
+check_dlm_controld()
+{
+	local pid=""
+
+	# dlm daemon should have only one instance, but for safe...
+	pid=$(pgrep dlm_controld | head -n1)
+	if ! daemon_is_running $pid ; then
+		ocf_exit_reason "DLM is not running. Is it configured?"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	return $OCF_SUCCESS
+}
+
+lvmlockd_start() {
+	local extras=""
+
+	ocf_log info "checking config settings for ${LOCKD}..."
+	check_config
+
+	ocf_log info "checking if DLM is started first..."
+	check_dlm_controld
+
+	if silent_status ; then
+		ocf_log info "${LOCKD} already started (pid=$(get_pid))"
+		return $OCF_SUCCESS
+	fi
+
+	if [ ! -z "$OCF_RESKEY_socket_path" ] ; then
+		extras="$extras -s ${OCF_RESKEY_socket_path}"
+	fi
+	if [ ! -z "$OCF_RESKEY_syslog_priority" ] ; then
+		extras="$extras -S ${OCF_RESKEY_syslog_priority}"
+	fi
+	if [ ! -z "$OCF_RESKEY_adopt" ] ; then
+		extras="$extras -A ${OCF_RESKEY_adopt}"
+	else
+		# Inside lvmlockd daemon, this option defaults to 0. But, we
+		# want it defaults to 1 for resource agent. When RA monitor pulls
+		# this daemon up, we expect it to adopt locks from a previous
+		# instance of lvmlockd.
+		extras="$extras -A 1"
+	fi
+	# This client only support "dlm" lock manager
+	extras="$extras -g dlm"
+
+	ocf_log info "starting ${LOCKD}..."
+	ocf_run ${LOCKD} -p ${OCF_RESKEY_pidfile} $extras
+	rc=$?
+	if [ $rc -ne $OCF_SUCCESS ] ; then
+		ocf_exit_reason "Failed to start ${LOCKD}, exit code: $rc"
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+# Each shared VG has its own lockspace. Besides, lvm_global lockspace
+# is for global use, and it should be the last one to close. It should
+# be enough to only check on lvm_global.
+wait_lockspaces_close()
+{
+	local retries=0
+
+	ocf_log info "Waiting for all lockspaces to be closed"
+	while [ $retries -lt "$TIMEOUT_COUNT" ]
+	do
+		if ! dlm_tool ls lvm_global | grep -Eqs "^name[[:space:]]+lvm_global" ; then
+			return $OCF_SUCCESS
+		fi
+
+		sleep 0.5
+		retries=$((retries + 1))
+	done
+
+	ocf_exit_reason "Failed to close all lockspaces clearly"
+	exit $OCF_ERR_GENERIC
+}
+
+kill_stop()
+{
+	local pid=$1
+	local retries=0
+
+	ocf_log info "Killing ${LOCKD} (pid=$pid)"
+	while
+		daemon_is_running $pid && [ $retries -lt "$TIMEOUT_COUNT" ]
+	do
+		if [ $retries -ne 0 ] ; then
+			# don't sleep on the first try
+			sleep 0.5
+		fi
+		kill -s TERM $pid >/dev/null 2>&1
+		retries=$((retries + 1))
+	done
+
+}
+
+lvmlockd_stop() {
+	local pid=""
+
+	if ! silent_status ; then
+		ocf_log info "${LOCKD} is not running"
+		return $OCF_SUCCESS
+	fi
+
+	if [ -n "$(dlm_tool ls)" ]; then
+		# We are going to stop lvmlockd, at this moment, we hope all shared VG have
+		# been deactivated, otherwise we are in trouble: the stop action will fail!
+		ocf_log info "stop the lockspaces of shared VG(s)..."
+		ocf_run lvmlockctl --stop-lockspaces
+		rc=$?
+		if [ $rc -ne $OCF_SUCCESS ] ; then
+			ocf_exit_reason "Failed to close lockspace, exit code: $rc"
+			return $OCF_ERR_GENERIC
+		fi
+	fi
+
+	wait_lockspaces_close
+
+	pid=$(get_pid)
+	kill_stop $pid
+	if silent_status ; then
+		ocf_exit_reason "Failed to stop, ${LOCKD}[$pid] still running."
+		return $OCF_ERR_GENERIC
+	fi
+
+	return $OCF_SUCCESS
+}
+
+lvmlockd_monitor() {
+	if silent_status ; then
+		return $OCF_SUCCESS
+	fi
+
+	ocf_log info "${LOCKD} not running"
+	return $OCF_NOT_RUNNING
+}
+
+lvmlockd_validate() {
+	check_binary ${LOCKD}
+	check_binary lvm
+	check_binary dlm_tool
+	check_binary pgrep
+	check_binary lvmlockctl
+
+	return $OCF_SUCCESS
+}
+
+
+# Make sure meta-data and usage always succeed
+case $__OCF_ACTION in
+meta-data)		meta_data
+			exit $OCF_SUCCESS
+			;;
+usage|help)		usage
+			exit $OCF_SUCCESS
+			;;
+esac
+
+# Anything other than meta-data and usage must pass validation
+lvmlockd_validate || exit $?
+
+# Translate each action into the appropriate function call
+case $__OCF_ACTION in
+start)			lvmlockd_start
+			;;
+stop)			lvmlockd_stop
+			;;
+monitor)		lvmlockd_monitor
+			;;
+validate-all)		lvmlockd_validate
+			;;
+*)			usage
+			exit $OCF_ERR_UNIMPLEMENTED
+			;;
+esac
+rc=$?
+
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
diff --git a/heartbeat/minio b/heartbeat/minio
new file mode 100755
index 0000000..6d11956
--- /dev/null
+++ b/heartbeat/minio
@@ -0,0 +1,289 @@
+#!/bin/sh
+#
+# Resource script for Minio
+#
+# Description:  Manages Minio as an OCF resource in
+#               an Active-Passive High Availability setup.
+#
+# Author:       Ricardo Branco <tsmgeek at gmail.com> : Initial script for minio server
+# License:      GNU General Public License (GPL)
+#
+#
+#       usage: $0 {start|stop|status|monitor|validate-all|meta-data}
+#
+#       The "start" arg starts Minio.
+#
+#       The "stop" arg stops it.
+#
+# OCF parameters:
+#  OCF_RESKEY_binary
+#  OCF_RESKEY_conffile
+#  OCF_RESKEY_pidfile
+#  OCF_RESKEY_address
+#  OCF_RESKEY_volumnpaths
+#
+##########################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+# Defaults
+OCF_RESKEY_binary_default="/opt/minio/minio-server"
+OCF_RESKEY_confdir_default="/etc/minio"
+OCF_RESKEY_pidfile_default="/var/run/minio.pid"
+OCF_RESKEY_address_default=":9000"
+OCF_RESKEY_volumepaths_default="/home/shared"
+
+: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
+: ${OCF_RESKEY_confdir=${OCF_RESKEY_confdir_default}}
+: ${OCF_RESKEY_pidfile=${OCF_RESKEY_pidfile_default}}
+: ${OCF_RESKEY_address=${OCF_RESKEY_address_default}}
+: ${OCF_RESKEY_volumepaths=${OCF_RESKEY_volumepaths_default}}
+
+USAGE="Usage: $0 {start|stop|status|monitor|validate-all|meta-data}";
+
+##########################################################################
+
+usage() {
+        echo $USAGE >&2
+}
+
+meta_data() {
+        cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="minio">
+<version>1.0</version>
+<longdesc lang="en">
+This script manages Minio in an Active-Passive setup
+</longdesc>
+<shortdesc lang="en">OCF Resource Agent compliant Minio server script.</shortdesc>
+
+
+<parameters>
+
+<parameter name="binary">
+<longdesc lang="en">The Minio server binary</longdesc>
+<shortdesc lang="en">The Minio server binary</shortdesc>
+<content type="string" default="${OCF_RESKEY_binary_default}" />
+</parameter>
+
+<parameter name="confdir">
+<longdesc lang="en">
+The Minio configuration directory path.
+For example, "/etc/minio"
+</longdesc>
+<shortdesc lang="en">Configuration directory path</shortdesc>
+<content type="string" default="${OCF_RESKEY_confdir_default}" />
+</parameter>
+
+<parameter name="pidfile">
+<longdesc lang="en">The Minio PID file. The location of the PID file.</longdesc>
+<shortdesc lang="en">PID file</shortdesc>
+<content type="string" default="${OCF_RESKEY_pidfile_default}" />
+</parameter>
+
+<parameter name="address">
+<longdesc lang="en">Address to bind minio to.</longdesc>
+<shortdesc lang="en">Bind address</shortdesc>
+<content type="string" default="${OCF_RESKEY_address_default}" />
+</parameter>
+
+<parameter name="volumepaths">
+<longdesc lang="en">The storage volumes for minio to use.</longdesc>
+<shortdesc lang="en">Storage Volumes</shortdesc>
+<content type="string" default="${OCF_RESKEY_volumepaths_default}" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start"   timeout="20" />
+<action name="stop"    timeout="20" />
+<action name="monitor" depth="0"  timeout="20" interval="60" />
+<action name="validate-all"  timeout="2" />
+<action name="meta-data"  timeout="5" />
+</actions>
+
+</resource-agent>
+END
+        exit $OCF_SUCCESS
+}
+
+isRunning()
+{
+        kill -0 "$1" > /dev/null 2>&1
+}
+
+minio_status()
+{
+        if [ -f "$OCF_RESKEY_pidfile" ]
+        then
+        # Minio is probably running
+                PID=`head -n 1 $OCF_RESKEY_pidfile`
+                if [ ! -z "$PID" ] ; then
+                        isRunning "$PID" && `ps -p $PID | grep minio-server > /dev/null 2>&1`
+                        return $?
+                fi
+        fi
+
+        # Minio is not running
+        return $OCF_NOT_RUNNING;
+}
+
+minio_start()
+{
+        # make a few checks and start Minio
+        if ocf_is_root ; then : ; else
+                ocf_log err "You must be root"
+                exit $OCF_ERR_PERM
+        fi
+
+        # if Minio is running return success
+        if minio_status ; then
+                ocf_log info "Minio server is running already"
+                exit $OCF_SUCCESS
+        fi
+
+        # starting Minio
+        cmd="su - root -c \"nohup ${OCF_RESKEY_binary} server --quiet --config-dir ${OCF_RESKEY_confdir} --address ${OCF_RESKEY_address} ${OCF_RESKEY_volumepaths} >/dev/null &\"'echo \$!' "
+
+        ocf_log debug "Starting minio: $cmd"
+
+        eval $cmd > ${OCF_RESKEY_pidfile}
+
+        if [ "$?" -ne 0 ]; then
+                ocf_log err "Minio returned error" $?
+                exit $OCF_ERR_GENERIC
+        fi
+
+        exit $OCF_SUCCESS
+}
+
+
+minio_stop()
+{
+        if minio_status ; then
+                PID=`head -n 1 $OCF_RESKEY_pidfile`
+                if [ ! -z "$PID" ]; then
+                        ocf_log info "Killing Minio PID $PID"
+                        kill $PID > /dev/null 2>&1
+                        if [ "$?" -eq 0 ]; then
+                                TRIES=0
+                                while isRunning "$PID" && [ "$TRIES" -lt 30 ]
+                                do
+                                        sleep 1
+                                        ocf_log info "Minio PID $PID is still running"
+                                        TRIES=`expr $TRIES + 1`
+                                done
+                                isRunning "$PID"
+                                RET=$?
+                                if [ "$RET" -eq 0 ]; then
+                                        ocf_log info "Killing Minio PID $PID with SIGKILL"
+                                        kill -9 $PID > /dev/null 2>&1
+                                        while isRunning "$PID"
+                                        do
+                                                sleep 1
+                                                ocf_log info "Minio PID $PID is still running"
+                                        done
+                                fi
+                        else
+                                ocf_log err "Killing Minio PID $PID FAILED"
+                                exit $OCF_ERR_GENERIC
+                        fi
+                fi
+        fi
+
+        exit $OCF_SUCCESS
+}
+
+minio_monitor()
+{
+        minio_status
+        RET=$?
+
+        if [ "$RET" -eq 0 ]; then
+                PID=`head -n 1 $OCF_RESKEY_pidfile`
+                ocf_log debug "Minio monitor on PID $PID succeeded"
+                return $OCF_SUCCESS
+        else
+                ocf_log debug "Minio monitor on PID $PID failed"
+                return $OCF_NOT_RUNNING
+        fi
+}
+
+minio_validate_all()
+{
+
+        # check that the minio binary exists
+        if [ ! -x "$OCF_RESKEY_binary" ]; then
+                ocf_log err "Minio server binary $OCF_RESKEY_binary does not exist"
+                exit $OCF_ERR_INSTALLED
+        fi
+
+        # check that the Minioconfig file exists
+        if [ ! -d "$OCF_RESKEY_confdir" ]; then
+                ocf_log err "Minio config dir $OCF_RESKEY_confdir does not exist"
+                exit $OCF_ERR_CONFIGURED
+        fi
+
+}
+
+#
+# Main
+#
+
+if [ $# -ne 1 ]
+then
+  usage
+  exit $OCF_ERR_ARGS
+fi
+
+case $1 in
+    start)
+        minio_validate_all
+        minio_start
+        ;;
+
+    stop)
+        minio_stop
+        ;;
+
+    status)
+        if minio_status; then
+            ocf_log info "Minio is running"
+            exit $OCF_SUCCESS
+        else
+            ocf_log info "Minio is stopped"
+            exit $OCF_NOT_RUNNING
+        fi
+        ;;
+
+    monitor)
+        minio_monitor
+        ;;
+
+    validate-all)
+        minio_validate_all
+        exit $OCF_SUCCESS
+        ;;
+
+    meta-data|metadata|meta_data)
+        meta_data
+        ;;
+
+    usage)
+        usage
+        exit $OCF_SUCCESS
+        ;;
+
+    *)
+        usage
+        ocf_log err "$0 was called with unsupported args: $*"
+        exit $OCF_ERR_UNIMPLEMENTED
+        ;;
+esac
+rc=$?
+ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
+exit $rc
diff --git a/heartbeat/mysql b/heartbeat/mysql
index e76213b..1b24248 100755
--- a/heartbeat/mysql
+++ b/heartbeat/mysql
@@ -324,7 +324,7 @@ get_read_only() {
     local read_only_state
 
     read_only_state=`$MYSQL $MYSQL_OPTIONS_REPL \
-        -e "SHOW VARIABLES" | grep -w read_only | awk '{print $2}'`
+        --skip-column-names -e "SHOW VARIABLES LIKE 'read_only'" | awk '{print $2}'`
 
     if [ "$read_only_state" = "ON" ]; then
         return 0
@@ -719,13 +719,22 @@ mysql_monitor() {
     fi
  
     mysql_common_status $status_loglevel
-
     rc=$?
 
     # TODO: check max connections error
 
     # If status returned an error, return that immediately
     if [ $rc -ne $OCF_SUCCESS ]; then
+        if ocf_is_ms ; then
+            # This is a master slave setup but monitored host returned some errors.
+            # Immediately remove it from the pool of possible masters by erasing its master-mysql key
+            # When new mysql master election is started and node got no or negative master-mysql attribute the following is logged
+            #   nodename.com pengine: debug: master_color: mysql:0 master score: -1
+            # If there are NO nodes with positive vaule election of mysql master will fail with
+            #   nodename.com pengine: info: master_color: ms_mysql: Promoted 0 instances of a possible 1 to master
+            $CRM_MASTER -D
+        fi
+
         return $rc
     fi
 
@@ -742,13 +751,20 @@ mysql_monitor() {
         rc=$?
 
         if [ $rc -ne 0 ]; then
+            # We are master/slave and test failed. Delete master score for this node as it is considered unhealthy because of this particular failed check.
+            ocf_is_ms && $CRM_MASTER -D
             ocf_exit_reason "Failed to select from $test_table";
             return $OCF_ERR_GENERIC;
         fi
+    else
+        # In case no exnteded tests are enabled and we are in master/slave mode _always_ set the master score to 1 if we reached this point
+        ocf_is_ms && $CRM_MASTER -v 1
     fi
 
     if ocf_is_ms && ! get_read_only; then
         ocf_log debug "MySQL monitor succeeded (master)";
+        # Always set master score for the master
+        $CRM_MASTER -v 2
         return $OCF_RUNNING_MASTER
     else
         ocf_log debug "MySQL monitor succeeded";
diff --git a/heartbeat/named b/heartbeat/named
index 2118e0c..4856cdc 100755
--- a/heartbeat/named
+++ b/heartbeat/named
@@ -25,6 +25,7 @@ OCF_RESKEY_named_pidfile_default="/var/run/named/named.pid"
 OCF_RESKEY_named_rootdir_default=""
 OCF_RESKEY_named_options_default=""
 OCF_RESKEY_named_keytab_file_default=""
+OCF_RESKEY_rndc_options_default=""
 OCF_RESKEY_monitor_request_default="localhost"
 OCF_RESKEY_monitor_response_default="127.0.0.1"
 OCF_RESKEY_monitor_ip_default="127.0.0.1"
@@ -38,6 +39,7 @@ OCF_RESKEY_monitor_ip_default="127.0.0.1"
 : ${OCF_RESKEY_named_rootdir=${OCF_RESKEY_named_rootdir_default}}
 : ${OCF_RESKEY_named_options=${OCF_RESKEY_named_options_default}}
 : ${OCF_RESKEY_named_keytab_file=${OCF_RESKEY_named_keytab_file_default}}
+: ${OCF_RESKEY_rndc_options=${OCF_RESKEY_rndc_options_default}}
 : ${OCF_RESKEY_monitor_request=${OCF_RESKEY_monitor_request_default}}
 : ${OCF_RESKEY_monitor_response=${OCF_RESKEY_monitor_response_default}}
 : ${OCF_RESKEY_monitor_ip=${OCF_RESKEY_monitor_ip_default}}
@@ -144,6 +146,14 @@ named service keytab file (for GSS-TSIG).
 <content type="string" default="${OCF_RESKEY_named_keytab_file_default}" />
 </parameter>
 
+<parameter name="rndc_options" unique="0" required="0">
+<longdesc lang="en">
+Options for rndc process if any.
+</longdesc>
+<shortdesc lang="en">rndc_options</shortdesc>
+<content type="string" default="${OCF_RESKEY_rndc_options_default}" />
+</parameter>
+
 <parameter name="monitor_request" unique="0" required="0">
 <longdesc lang="en">
 Request that shall be sent to named for monitoring. Usually an A record in DNS.
@@ -326,7 +336,7 @@ named_monitor() {
 #
 
 named_reload() {
-    $OCF_RESKEY_rndc reload >/dev/null || return $OCF_ERR_GENERIC
+    $OCF_RESKEY_rndc $OCF_RESKEY_rndc_options reload >/dev/null || return $OCF_ERR_GENERIC
     
     return $OCF_SUCCESS
 }
@@ -396,7 +406,7 @@ named_stop () {
     
     named_status || return $OCF_SUCCESS
     
-    $OCF_RESKEY_rndc stop >/dev/null
+    $OCF_RESKEY_rndc $OCF_RESKEY_rndc_options stop >/dev/null
     if [ $? -ne 0 ]; then
         ocf_log info "rndc stop failed. Killing named."
         kill `cat ${OCF_RESKEY_named_pidfile}`
diff --git a/heartbeat/nfsserver b/heartbeat/nfsserver
index bac5bbb..9ce465c 100755
--- a/heartbeat/nfsserver
+++ b/heartbeat/nfsserver
@@ -759,10 +759,10 @@ nfsserver_stop ()
 	case $EXEC_MODE in
             [23]) ocf_log info "Stop: threads"
 		  tfn="/proc/fs/nfsd/threads"
-		  if [ -f "$tfn" ] && [ "$(cat $tfn)" -gt "0" ]; then
-			ocf_exit_reason "NFS server failed to stop: /proc/fs/nfsd/threads"
-			return $OCF_ERR_GENERIC
-		  fi
+		  while [ -f "$tfn" ] && [ "$(cat $tfn)" -gt "0" ]; do
+			ocf_log err "NFS server failed to stop: /proc/fs/nfsd/threads"
+			sleep 1
+		  done
 
 		  nfs_exec stop rpc-statd > /dev/null 2>&1
 		  ocf_log info "Stop: rpc-statd"
diff --git a/heartbeat/nginx b/heartbeat/nginx
index eed6665..ec9f702 100755
--- a/heartbeat/nginx
+++ b/heartbeat/nginx
@@ -420,7 +420,7 @@ start_nginx() {
   then
     : Configuration file $CONFIGFILE looks OK
   else
-    return $OCF_ERR_CONFIGURED
+    return $OCF_ERR_INSTALLED
   fi
   NGINX_VERSION=`$NGINXD -v 2>&1`
   ocf_log info "Starting $NGINXD - $NGINX_VERSION"
diff --git a/heartbeat/ocf-directories.in b/heartbeat/ocf-directories.in
index 8d70776..d8df035 100644
--- a/heartbeat/ocf-directories.in
+++ b/heartbeat/ocf-directories.in
@@ -13,7 +13,7 @@ exec_prefix=@exec_prefix@
 : ${HA_FIFO:=@localstatedir@/lib/heartbeat/fifo}
 : ${HA_BIN:=@libexecdir@/heartbeat}
 : ${HA_SBIN_DIR:=@sbindir@}
-: ${HA_DATEFMT:="%Y/%m/%d_%T "}
+: ${HA_DATEFMT:="%b %d %T "}
 : ${HA_DEBUGLOG:=/dev/null}
 : ${HA_RESOURCEDIR:=$HA_DIR/resource.d}
 : ${HA_DOCDIR:=@datadir@/doc/heartbeat}
diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in
index 7d856a8..90620cf 100644
--- a/heartbeat/ocf-shellfuncs.in
+++ b/heartbeat/ocf-shellfuncs.in
@@ -22,7 +22,7 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 # 
 
-# Build version: 150fb85f2a442f53157fb8063089817e6ee05b00
+# Build version: 4a3326643bc4b45520c525c0094ac98a9746da6a
 
 # TODO: Some of this should probably split out into a generic OCF
 # library for shell scripts, but for the time being, we'll just use it
@@ -72,10 +72,11 @@ ocf_is_root() {
 }
 
 ocf_maybe_random() {
-	local rnd="$RANDOM"
-	# Something sane-ish in case a shell doesn't support $RANDOM
-	[ -n "$rnd" ] || rnd=$$
-	echo $rnd
+	if test -c /dev/urandom; then
+		od -An -N4 -tu4 /dev/urandom | tr -d '[:space:]'
+	else
+		awk -v pid=$$ 'BEGIN{srand(pid); print rand()}' | sed 's/^.*[.]//'
+	fi
 }
 
 # Portability comments:
@@ -231,7 +232,7 @@ __ha_log() {
 	  [ -n "$HA_LOGFILE" ]
 	then
 	  : appending to $HA_LOGFILE
-	  echo "$HA_LOGTAG:	"`hadate`"${*}" >> $HA_LOGFILE
+	  echo `hadate`" $HA_LOGTAG:    ${*}" >> $HA_LOGFILE
 	fi
 	if
 	  [ -z "$HA_LOGFACILITY" -a -z "$HA_LOGFILE" ] && ! [ "$ignore_stderr" = "true" ]
@@ -433,20 +434,20 @@ ocf_run() {
 
 	output=`"$@" 2>&1`
 	rc=$?
-	output=`echo $output`
+	[ -n "$output" ] && output="$(echo "$output" | tr -s ' \t\r\n' ' ')"
 	if [ $rc -eq 0 ]; then 
 	    if [ "$verbose" -a ! -z "$output" ]; then
 		ocf_log info "$output"
 	    fi
-	    return $OCF_SUCCESS
 	else
 	    if [ ! -z "$output" ]; then
 		ocf_log $loglevel "$output"
 	    else
 		ocf_log $loglevel "command failed: $*"
 	    fi
-	    return $rc
 	fi
+
+	return $rc
 }
 
 ocf_pidfile_status() {
@@ -465,24 +466,85 @@ ocf_pidfile_status() {
     return 1
 }
 
-ocf_take_lock() {
-    local lockfile=$1
-    local rnd=$(ocf_maybe_random)
+# mkdir(1) based locking
+# first the directory is created with the name given as $1
+# then a file named "pid" is created within that directory with
+# the process PID
+# stale locks are handled carefully, the inode of a directory
+# needs to match before and after test if the process is running
+# empty directories are also handled appropriately
+# we relax (sleep) occasionally to allow for other processes to
+# finish managing the lock in case they are in the middle of the
+# business
+
+relax() { sleep 0.5; }
+ocf_get_stale_pid() {
+	local piddir pid dir_inode
+
+	piddir="$1"
+	[ -z "$piddir" ] && return 2
+	dir_inode="`ls -di $piddir 2>/dev/null`"
+	[ -z "$dir_inode" ] && return 1
+	pid=`cat $piddir/pid 2>/dev/null`
+	if [ -z "$pid" ]; then
+		# empty directory?
+		relax
+		if [ "$dir_inode" = "`ls -di $piddir 2>/dev/null`" ]; then
+			echo $dir_inode
+		else
+			return 1
+		fi
+	elif kill -0 $pid >/dev/null 2>&1; then
+		return 1
+	elif relax && [ -e "$piddir/pid" ] && [ "$dir_inode" = "`ls -di $piddir 2>/dev/null`" ]; then
+		echo $pid
+	else
+		return 1
+	fi
+}
+
+# There is a race when the following two functions to manage the
+# lock file (mk and rm) are invoked in parallel by different
+# instances. It is up to the caller to reduce probability of that
+# taking place (see ocf_take_lock() below).
 
-    sleep 0.$rnd
-    while 
-	ocf_pidfile_status $lockfile
-    do
-	ocf_log info "Sleeping until $lockfile is released..."
-	sleep 0.$rnd
-    done
-    echo $$ > $lockfile
+ocf_mk_pid() {
+	mkdir $1 2>/dev/null && echo $$ > $1/pid
+}
+ocf_rm_pid() {
+	rm -f $1/pid
+	rmdir $1 2>/dev/null
 }
 
+# Testing and subsequently removing a stale lock (containing the
+# process pid) is inherently difficult to do in such a way as to
+# prevent a race between creating a pid file and removing it and
+# its directory. We reduce the probability of that happening by
+# checking if the stale lock persists over a random period of
+# time.
+
+ocf_take_lock() {
+	local lockdir=$1
+	local rnd
+	local stale_pid
+
+	# we don't want it too short, so strip leading zeros
+	rnd=$(ocf_maybe_random | sed 's/^0*//')
+	stale_pid=`ocf_get_stale_pid $lockdir`
+	if [ -n "$stale_pid" ]; then
+		sleep 0.$rnd
+		# remove "stale pid" only if it persists
+		[ "$stale_pid" = "`ocf_get_stale_pid $lockdir`" ] &&
+			ocf_rm_pid $lockdir
+	fi
+	while ! ocf_mk_pid $lockdir; do
+		ocf_log info "Sleeping until $lockdir is released..."
+		sleep 0.$rnd
+	done
+}
 
 ocf_release_lock_on_exit() {
-    local lockfile=$1
-    trap "rm -f $lockfile" EXIT
+	trap "ocf_rm_pid $1" EXIT
 }
 
 # returns true if the CRM is currently running a probe. A probe is
@@ -606,6 +668,22 @@ dirname()
 	return 0
 }
 
+# usage: systemd_drop_in <name> <After|Before> <dependency.service>
+systemd_drop_in()
+{
+	if [ $# -ne 3 ]; then
+          ocf_log err "Incorrect number of arguments [$#] for systemd_drop_in."
+        fi
+
+	systemdrundir="/run/systemd/system/resource-agents-deps.target.d"
+	mkdir "$systemdrundir"
+	cat > "$systemdrundir/$1.conf" <<EOF
+[Unit]
+$2=$3
+EOF
+	systemctl daemon-reload
+}
+
 #
 # pseudo_resource status tracking function...
 #
@@ -911,6 +989,44 @@ ocf_stop_trace() {
 	set +x
 }
 
+# Helper functions to map from nodename/bundle-name and physical hostname
+# list_index_for_word "node0 node1 node2 node3 node4 node5" node4 --> 5
+# list_word_at_index "NA host1 host2 host3 host4 host5" 3      --> host2
+
+# list_index_for_word "node1 node2 node3 node4 node5" node7 --> ""
+# list_word_at_index "host1 host2 host3 host4 host5" 8      --> ""
+
+# attribute_target node1                                    --> host1
+list_index_for_word() {
+	echo $1 | tr ' ' '\n' | awk -v x="$2" '$0~x {print NR}'
+}
+
+list_word_at_index() {
+	echo $1 | tr ' ' '\n' | awk -v n="$2" 'n == NR'
+}
+
+ocf_attribute_target() {
+	if [ x$1 = x ]; then
+		if [ x$OCF_RESKEY_CRM_meta_container_attribute_target = xhost -a x$OCF_RESKEY_CRM_meta_physical_host != x ]; then
+			echo $OCF_RESKEY_CRM_meta_physical_host
+		else
+			echo $OCF_RESKEY_CRM_meta_on_node
+		fi
+		return
+	elif [ x"$OCF_RESKEY_CRM_meta_notify_all_uname" != x ]; then
+		index=$(list_index_for_word "$OCF_RESKEY_CRM_meta_notify_all_uname" $1)
+		mapping=""
+		if [ x$index != x ]; then
+			mapping=$(list_word_at_index "$OCF_RESKEY_CRM_meta_notify_all_hosts" $index)
+		fi
+		if [ x$mapping != x -a x$mapping != xNA ]; then
+			echo $mapping
+			return
+		fi
+	fi
+	echo $1
+}
+
 __ocf_set_defaults "$@"
 
 : ${OCF_TRACE_RA:=$OCF_RESKEY_trace_ra}
diff --git a/heartbeat/oraasm b/heartbeat/oraasm
new file mode 100755
index 0000000..22b88ea
--- /dev/null
+++ b/heartbeat/oraasm
@@ -0,0 +1,179 @@
+#!/bin/sh
+#
+#  License:      GNU General Public License (GPL)
+#  (c) 2017 O. Albrigtsen
+#           and Linux-HA contributors
+#
+# -----------------------------------------------------------------------------
+#      O C F    R E S O U R C E    S C R I P T   S P E C I F I C A T I O N
+# -----------------------------------------------------------------------------
+#
+# NAME
+#       oraasm : OCF resource agent script for Oracle ASM
+#
+
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+# Defaults
+OCF_RESKEY_user_default="grid"
+
+: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
+
+
+oraasm_usage() {
+	cat <<END
+    usage: $0 (start|stop|validate-all|meta-data|help|usage|monitor)
+    $0 manages a Oracle ASM Disk Group as an OCF HA resource.
+    The 'start' operation starts the instance.
+    The 'stop' operation stops the instance.
+    The 'status' operation reports whether the instance is running
+    The 'monitor' operation reports whether the instance seems to be working
+    The 'validate-all' operation reports whether the parameters are valid
+END
+}
+
+oraasm_meta_data() {
+	cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="oraasm">
+<version>0.75</version>
+
+<longdesc lang="en">OCF Resource script for Oracle ASM. It uses the ohasd init-script to manage a Oracle ASM Disk Group as a HA resource.</longdesc>
+<shortdesc lang="en">Oracle ASM resource agent</shortdesc>
+
+<parameters>
+
+<parameter name="user">
+    <longdesc lang="en">Oracle Grid user</longdesc>
+    <shortdesc lang="en">Oracle Grid user</shortdesc>
+    <content type="string" default="${OCF_RESKEY_user_default}" />
+</parameter>
+
+<parameter name="diskgroup" required="1">
+    <longdesc lang="en">
+The name of the Oracle Disk Group.
+If not specified, then the Disk Group along with its home should be listed in /etc/oratab.
+    </longdesc>
+    <shortdesc lang="en">Oracle Disk Group</shortdesc>
+    <content type="string" default="" />
+</parameter>
+
+<parameter name="home" unique="0">
+<longdesc lang="en">The Oracle Grid home directory</longdesc>
+<shortdesc lang="en">home</shortdesc>
+<content type="string" default="" />
+</parameter>
+
+</parameters>
+
+<actions>
+<action name="start" timeout="60" />
+<action name="stop" timeout="60" />
+<action name="status" timeout="30" />
+<action name="monitor" depth="0" timeout="30" interval="10" />
+<action name="validate-all" timeout="5" />
+<action name="meta-data" timeout="5" />
+</actions>
+</resource-agent>
+END
+}
+
+oraasm_methods() {
+	cat <<-!
+	start
+	stop
+	status
+	monitor
+	validate-all
+	methods
+	meta-data
+	usage
+	!
+}
+
+oraasm_getconfig() {
+	[ x = "x$OCF_RESKEY_home" ] &&
+		OCF_RESKEY_home=`awk -F: "/^+$OCF_RESKEY_diskgroup:/"'{print $2}' /etc/oratab`
+	PATH="$OCF_RESKEY_home/bin:$PATH"
+
+	ORA_ENVF=`mktemp`
+	cat << EOF > $ORA_ENVF
+PATH="$OCF_RESKEY_home/bin:$PATH"
+EOF
+	chmod 644 $ORA_ENVF
+	trap "rm -f $ORA_ENVF" EXIT
+}
+
+oraasm_start() {
+	# if resource is already running, no need to continue code after this.
+	if oraasm_monitor; then
+		ocf_log info "Oracle ASM is already running"
+		return $OCF_SUCCESS
+	fi
+
+	ocf_run -q /etc/init.d/ohasd start
+
+	while ! oraasm_monitor; do
+		sleep 1
+	done
+
+	return $OCF_SUCCESS
+}
+
+oraasm_stop() {
+	oraasm_monitor
+	if [ $? -ne $OCF_SUCCESS ]; then
+		# Currently not running. Nothing to do.
+		ocf_log info "Oracle ASM is already stopped"
+
+		return $OCF_SUCCESS
+	fi
+
+	ocf_run -q /etc/init.d/ohasd stop
+
+	# Wait for process to stop
+	while oraasm_monitor; do
+		sleep 1
+	done
+
+	return $OCF_SUCCESS
+}
+
+oraasm_monitor() {
+	su - $OCF_RESKEY_user -c ". $ORA_ENVF; crsctl check has | grep -q \"CRS-4638\""
+	case "$?" in
+		0)
+			rc=$OCF_SUCCESS
+			;;
+		1)
+			rc=$OCF_NOT_RUNNING
+			ocf_log info "Oracle ASM is not running"
+			;;
+		*)
+			rc=$OCF_ERR_GENERIC
+			;;
+	esac
+	return $rc
+}
+
+oraasm_status() {
+	rc=$(oraasm_monitor)
+	return $rc
+}
+
+oraasm_validate_all() {
+	if [ x = "x$OCF_RESKEY_home" ]; then
+		ocf_exit_reason "home not set"
+		return $OCF_ERR_CONFIGURED
+	fi
+}
+
+
+OCF_REQUIRED_PARAMS="user diskgroup"
+OCF_REQUIRED_BINARIES="/etc/init.d/ohasd crsctl"
+ocf_rarun $*
+
+# vim:tabstop=4:shiftwidth=4:textwidth=0:wrapmargin=0
diff --git a/heartbeat/ovsmonitor b/heartbeat/ovsmonitor
new file mode 100755
index 0000000..000854b
--- /dev/null
+++ b/heartbeat/ovsmonitor
@@ -0,0 +1,450 @@
+#!/bin/sh
+#
+#	   OCF Resource Agent compliant script.
+#	   Monitor the vitality of a local OpenVSwitch bond.
+#
+# 	Based on the work by Alexander Krauth.
+#
+#	Transfered from ethmonitor into ovsmonitor by Mathieu Grzybek.
+#
+# Copyright (c) 2017 Robert Euhus, Alexander Krauth, Lars Marowsky-Bré
+#		Mathieu Grzybek
+#					All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like.  Any license provided herein, whether implied or
+# otherwise, applies only to this software file.  Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#	 OCF parameters are as below
+#
+#	OCF_RESKEY_bond
+#	OCF_RESKEY_bridge
+#	OCF_RESKEY_multiplicator
+#	OCF_RESKEY_name
+#	OCF_RESKEY_repeat_count
+#	OCF_RESKEY_repeat_interval
+#	OCF_RESKEY_pktcnt_timeout
+#
+#######################################################################
+# Initialization:
+
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+#######################################################################
+
+meta_data() {
+	cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ovsmonitor">
+<version>0.1</version>
+
+<longdesc lang="en">
+Monitor the vitality of a local ovs bond.
+
+You may set up this RA as a clone resource to monitor the network bonds on different nodes, with the same bond name.
+This is not related to the IP address or the network on which a bond is configured.
+You may use this RA to move resources away from a node, which has a faulty bond or prevent moving resources to such a node.
+This gives you independend control of the resources, without involving cluster intercommunication. But it requires your nodes to have more than one network bond.
+
+The resource configuration requires a monitor operation, because the monitor does the main part of the work.
+In addition to the resource configuration, you need to configure some location constraints, based on a CIB attribute value.
+The name of the attribute value is configured in the 'name' option of this RA.
+
+Example constraint configuration using crmsh
+location loc_connected_node my_resource_grp \
+        rule $id="rule_loc_connected_node" -INF: ovsmonitor-bond-public eq 0
+
+Example constraint configuration using pcs. Only allow 'my_resource' to run on nodes where eth0 ethernet device is available.
+pcs constraint location my_resource rule score=-INFINITY ovsmonitor-bond-public ne 1
+
+The ethmonitor works in 3 different modes to test the bond vitality.
+1. call ovs-appctl to see if at least one of the bonding's link status is up (if link is down -> error)
+2. call ovs-ofctl and watch the RX counter (if packages come around in a certain time -> success)
+3. return error
+</longdesc>
+<shortdesc lang="en">Monitors ovs bonding bonds</shortdesc>
+
+<parameters>
+<parameter name="bond" unique="1" required="1">
+<longdesc lang="en">
+The name of the network bond which should be monitored (e.g. bond-public).
+</longdesc>
+<shortdesc lang="en">Bond bond name</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="bridge" unique="1" required="1">
+<longdesc lang="en">
+The name of the ovs bridge that contains the bridge.
+</longdesc>
+<shortdesc lang="en">ovs bridge</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="name" unique="1">
+<longdesc lang="en">
+The name of the CIB attribute to set. This is the name to be used in the constraints. Defaults to "ethmonitor-'bond_name'".
+</longdesc>
+<shortdesc lang="en">Attribute name</shortdesc>
+<content type="string" default=""/>
+</parameter>
+
+<parameter name="multiplier" unique="0" >
+<longdesc lang="en">
+Multiplier for the value of the CIB attriobute specified in parameter name.
+</longdesc>
+<shortdesc lang="en">Multiplier for result variable</shortdesc>
+<content type="integer" default="1"/>
+</parameter>
+
+<parameter name="repeat_count">
+<longdesc lang="en">
+Specify how often the bond will be monitored, before the status is set to failed. You need to set the timeout of the monitoring operation to at least repeat_count * repeat_interval
+</longdesc>
+<shortdesc lang="en">Monitor repeat count</shortdesc>
+<content type="integer" default="5"/>
+</parameter>
+
+<parameter name="repeat_interval">
+<longdesc lang="en">
+Specify how long to wait in seconds between the repeat_counts.
+</longdesc>
+<shortdesc lang="en">Monitor repeat interval in seconds</shortdesc>
+<content type="integer" default="10"/>
+</parameter>
+
+<parameter name="pktcnt_timeout">
+<longdesc lang="en">
+Timeout for the RX packet counter. Stop listening for packet counter changes after the given number of seconds.
+</longdesc>
+<shortdesc lang="en">packet counter timeout</shortdesc>
+<content type="integer" default="5"/>
+</parameter>
+
+<parameter name="link_status_only">
+<longdesc lang="en">
+Only report success based on link status. Do not perform RX counter related connectivity tests.
+</longdesc>
+<shortdesc lang="en">link status check only</shortdesc>
+<content type="boolean" default="false" />
+</parameter>
+
+</parameters>
+<actions>
+<action name="start" timeout="60s" />
+<action name="stop" timeout="20s" />
+<action name="status" depth="0" timeout="60s" interval="10s" />
+<action name="monitor" depth="0" timeout="60s" interval="10s" />
+<action name="meta-data" timeout="5s" />
+<action name="validate-all" timeout="20s" />
+</actions>
+</resource-agent>
+END
+
+	exit $OCF_SUCCESS
+}
+
+#
+# Return true, if the bond exists
+#
+is_bond() {
+	#
+	# List bonds but exclude FreeS/WAN ipsecN virtual bonds
+	#
+	ovs-appctl bond/show $OCF_RESKEY_bond 1>/dev/null 2>&1
+}
+
+#
+# Return true, if the bridge exists
+#
+is_bridge() {
+	#
+	# List bonds but exclude FreeS/WAN ipsecN virtual bonds
+	#
+	#ovs-appctl bond/show $OCF_RESKEY_bond 1>/dev/null 2>&1
+	ovs-vsctl show|grep Bridge|grep -q $OCF_RESKEY_bridge
+}
+
+
+if_init() {
+	local rc
+
+	if [ X"$OCF_RESKEY_bond" = "X" ]; then
+		ocf_exit_reason "Bond name (the bond parameter) is mandatory"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	if [ X"$OCF_RESKEY_bridge" = "X" ]; then
+		ocf_exit_reason "Bridge name (the bridge parameter) is mandatory"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	BOND="$OCF_RESKEY_bond"
+	BRIDGE="$OCF_RESKEY_bridge"
+
+	if is_bond
+	then
+		if ! is_bridge
+		then
+			ocf_exit_reason "Bridge $OCF_RESKEY_bond does not exist"
+	 		exit $OCF_ERR_CONFIGURED;
+		fi
+	else
+		ocf_exit_reason "Bond $OCF_RESKEY_bond does not exist"
+		exit $OCF_ERR_CONFIGURED;
+	fi
+
+	: ${OCF_RESKEY_multiplier:="1"}
+	if ! ocf_is_decimal "$OCF_RESKEY_multiplier"; then
+		ocf_exit_reason "Invalid OCF_RESKEY_multiplier [$OCF_RESKEY_multiplier]"
+		exit $OCF_ERR_CONFIGURED
+	fi
+
+	ATTRNAME=${OCF_RESKEY_name:-"ovsmonitor-$BOND"}
+
+	REP_COUNT=${OCF_RESKEY_repeat_count:-5}
+	if ! ocf_is_decimal "$REP_COUNT" -o [ $REP_COUNT -lt 1 ]; then
+		ocf_exit_reason "Invalid OCF_RESKEY_repeat_count [$REP_COUNT]"
+		exit $OCF_ERR_CONFIGURED
+	fi
+	REP_INTERVAL_S=${OCF_RESKEY_repeat_interval:-10}
+	if ! ocf_is_decimal "$REP_INTERVAL_S"; then
+		ocf_exit_reason "Invalid OCF_RESKEY_repeat_interval [$REP_INTERVAL_S]"
+		exit $OCF_ERR_CONFIGURED
+	fi
+	: ${OCF_RESKEY_pktcnt_timeout:="5"}
+	if ! ocf_is_decimal "$OCF_RESKEY_pktcnt_timeout"; then
+		ocf_exit_reason "Invalid OCF_RESKEY_pktcnt_timeout [$OCF_RESKEY_pktcnt_timeout]"
+		exit $OCF_ERR_CONFIGURED
+	fi
+	return $OCF_SUCCESS
+}
+
+# get the link status on $BOND
+# asks ip about running (up) bonds, returns the number of matching bond names that are up
+get_link_status () {
+	#$IP2UTIL -o link show up dev "$BOND" | grep -v 'NO-CARRIER' | grep -c "$BOND"
+	ovs-appctl bond/show "$BOND"|awk -F: '/^slave/ {print $2}'|grep -c enabled
+}
+
+# returns the number of received rx packets on $BOND
+get_rx_packets () {
+	ocf_log debug "bond $BOND - bridge $BRIDGE"
+	#$IP2UTIL -o -s link show dev "$BOND" \
+	#	| sed 's/.* RX: [^0-9]*[0-9]* *\([0-9]*\) .*/\1/'
+	local ovs_port
+
+	for ovs_port in $(ovs-appctl bond/show $BOND|awk '/^slave/ {gsub(":","");print $2}') ; do
+		ovs-ofctl dump-ports $BRIDGE $ovs_port
+	done \
+		| awk -F, 'BEGIN{total=0} /rx/ {gsub(".*pkts=","");total=total+int($1)} END{print total}'
+}
+
+# watch for packet counter changes for max. OCF_RESKEY_pktcnt_timeout seconds
+# returns immedeately with return code 0 if any packets were received
+# otherwise 1 is returned
+watch_pkt_counter () {
+	local RX_PACKETS_NEW
+	local RX_PACKETS_OLD
+	RX_PACKETS_OLD="`get_rx_packets`"
+	for n in `seq $(( $OCF_RESKEY_pktcnt_timeout * 10 ))`; do
+		sleep 0.1
+		RX_PACKETS_NEW="`get_rx_packets`"
+		ocf_log debug "RX_PACKETS_OLD: $RX_PACKETS_OLD	RX_PACKETS_NEW: $RX_PACKETS_NEW"
+		if [ "$RX_PACKETS_OLD" -ne "$RX_PACKETS_NEW" ]; then
+			ocf_log debug "we received some packets."
+			return 0
+		fi
+	done
+	return 1
+}
+
+#
+# Check the bond depending on the level given as parameter: $OCF_RESKEY_check_level
+#
+# 10: watch for packet counter changes
+#
+#
+# 30:  watch for packet counter changes in promiscios mode
+#
+# If unsuccessfull in levels 18 and above,
+# the tests for higher check levels are run.
+#
+if_check () {
+	# always check link status first
+	link_status="`get_link_status`"
+	ocf_log debug "link_status: $link_status (up > 0, down = 0)"
+
+	if [ $link_status -eq 0 ]; then
+		ocf_log notice "link_status: DOWN"
+		return $OCF_NOT_RUNNING
+	fi
+
+	# if using link_status_only, skip RX count related test
+	if ocf_is_true "$OCF_RESKEY_link_status_only"; then
+		return $OCF_SUCCESS
+	fi
+
+	# watch for packet counter changes
+	ocf_log debug "watch for packet counter changes"
+	watch_pkt_counter
+	if [ $? -eq 0 ]; then
+		return $OCF_SUCCESS
+	else
+		ocf_log debug "No packets received during packet watch timeout"
+	fi
+
+	# watch for packet counter changes in promiscios mode
+#	ocf_log debug "watch for packet counter changes in promiscios mode"
+	# be sure switch off promiscios mode in any case
+	# TODO: check first, wether promisc is already on and leave it untouched.
+#	trap "$IP2UTIL link set dev $BOND promisc off; exit" INT TERM EXIT
+#		$IP2UTIL link set dev $BOND promisc on
+#		watch_pkt_counter && return $OCF_SUCCESS
+#		$IP2UTIL link set dev $BOND promisc off
+#	trap - INT TERM EXIT
+
+	# looks like it's not working (for whatever reason)
+	return $OCF_NOT_RUNNING
+}
+
+#######################################################################
+
+if_usage() {
+	cat <<END
+usage: $0 {start|stop|status|monitor|validate-all|meta-data}
+
+Expects to have a fully populated OCF RA-compliant environment set.
+END
+}
+
+set_cib_value() {
+	local score=`expr $1 \* $OCF_RESKEY_multiplier`
+	attrd_updater -n $ATTRNAME -v $score -q
+	local rc=$?
+	case $rc in
+		0) ocf_log debug "attrd_updater: Updated $ATTRNAME = $score" ;;
+		*) ocf_log warn "attrd_updater: Could not update $ATTRNAME = $score: rc=$rc";;
+	esac
+	return $rc
+}
+
+if_monitor() {
+	ha_pseudo_resource $OCF_RESOURCE_INSTANCE monitor
+	local pseudo_status=$?
+	if [ $pseudo_status -ne $OCF_SUCCESS ]; then
+		exit $pseudo_status
+	fi
+
+	local mon_rc=$OCF_NOT_RUNNING
+	local attr_rc=$OCF_NOT_RUNNING
+	local runs=0
+	local start_time
+	local end_time
+	local sleep_time
+	while [ $mon_rc -ne $OCF_SUCCESS -a $REP_COUNT -gt 0 ]
+	do
+		start_time=`date +%s%N`
+		if_check
+		mon_rc=$?
+		REP_COUNT=$(( $REP_COUNT - 1 ))
+		if [ $mon_rc -ne $OCF_SUCCESS -a $REP_COUNT -gt 0 ]; then
+			ocf_log warn "Monitoring of $OCF_RESOURCE_INSTANCE failed, $REP_COUNT retries left."
+			end_time=`date +%s%N`
+			sleep_time=`echo "scale=9; ( $start_time + ( $REP_INTERVAL_S * 1000000000 ) - $end_time ) / 1000000000" | bc -q 2> /dev/null`
+			sleep $sleep_time 2> /dev/null
+			runs=$(($runs + 1))
+		fi
+
+		if [ $mon_rc -eq $OCF_SUCCESS -a $runs -ne 0 ]; then
+			ocf_log info "Monitoring of $OCF_RESOURCE_INSTANCE recovered from error"
+		fi
+	done
+
+	ocf_log debug "Monitoring return code: $mon_rc"
+	if [ $mon_rc -eq $OCF_SUCCESS ]; then
+		set_cib_value 1
+		attr_rc=$?
+	else
+		ocf_log err "Monitoring of $OCF_RESOURCE_INSTANCE failed."
+		set_cib_value 0
+		attr_rc=$?
+	fi
+
+	## The resource should not fail, if the bond is down. It should fail, if the update of the CIB variable has errors.
+	## To react on the bond failure you must use constraints based on the CIB variable value, not on the resource itself.
+	exit $attr_rc
+}
+
+if_stop()
+{
+	attrd_updater -D -n $ATTRNAME
+	ha_pseudo_resource $OCF_RESOURCE_INSTANCE stop
+}
+
+if_start()
+{
+	local rc
+	ha_pseudo_resource $OCF_RESOURCE_INSTANCE start
+	rc=$?
+	if [ $rc -ne $OCF_SUCCESS ]; then
+		ocf_exit_reason "Failure to create ovsmonitor state file"
+		return $rc
+	fi
+
+	# perform the first monitor during the start operation
+	if_monitor
+	return $?
+}
+
+
+if_validate() {
+	check_binary ovs-vsctl
+	check_binary ovs-appctl
+	check_binary ovs-ofctl
+	if_init
+}
+
+case $__OCF_ACTION in
+meta-data)	meta_data
+		;;
+usage|help)	if_usage
+		exit $OCF_SUCCESS
+		;;
+esac
+
+if_validate
+
+case $__OCF_ACTION in
+start)		if_start
+		exit $?
+		;;
+stop)		if_stop
+		exit $?
+		;;
+monitor|status)	if_monitor
+		exit $?
+		;;
+validate-all)	exit $?
+		;;
+*)		if_usage
+		exit $OCF_ERR_UNIMPLEMENTED
+		;;
+esac
diff --git a/heartbeat/pgsql b/heartbeat/pgsql
index 15b06df..07d0507 100755
--- a/heartbeat/pgsql
+++ b/heartbeat/pgsql
@@ -299,7 +299,7 @@ It requires master_ip restore_command parameters.
 <parameter name="node_list" unique="0" required="0">
 <longdesc lang="en">
 All node names. Please separate each node name with a space.
-This is required for replication.
+This is optional for replication. Defaults to all nodes in the cluster
 </longdesc>
 <shortdesc lang="en">node list</shortdesc>
 <content type="string" default="${OCF_RESKEY_node_list_default}" />
@@ -377,8 +377,7 @@ This is optional for replication.
 Set this option when using replication slots.
 Can only use lower case letters, numbers and underscore for replication_slot_name.
 
-When the master node has 1 slave node,one replication slot would be created with the name "replication_slot_name".
-When the master node has 2 or more slave nodes,the replication slots would be created for each node, with the name adding the node name as postfix.
+The replication slots would be created for each node, with the name adding the node name as postfix.
 For example, replication_slot_name is "sample" and 2 slaves which are "node1" and "node2" connect to
 their slots, the slots names are "sample_node1" and "sample_node2".
 If the node name contains a upper case letter, hyphen and dot, those characters will be converted to a lower case letter or an underscore.
@@ -408,6 +407,10 @@ This is optional for replication.
 <longdesc lang="en">
 Number of checks of xlog on monitor before promote.
 This is optional for replication.
+
+Note: For backward compatibility, the terms are unified with PostgreSQL 9.
+      If you are using PostgreSQL 10 or later, replace "xlog" with "wal".
+      Likewise, replacing "location" with "lsn".
 </longdesc>
 <shortdesc lang="en">xlog check count</shortdesc>
 <content type="integer" default="${OCF_RESKEY_xlog_check_count_default}" />
@@ -594,7 +597,7 @@ pgsql_real_start() {
     pgctl_options="$pgctl_options -o '$postgres_options'"
 
     # Invoke pg_ctl
-    runasowner "unset PGUSER; unset PGPASSWORD; $OCF_RESKEY_pgctl $pgctl_options start"
+    runasowner "unset PGUSER; unset PGPASSWORD; $OCF_RESKEY_pgctl $pgctl_options -W start"
 
     if [ $? -eq 0 ]; then
         # Probably started.....
@@ -615,12 +618,11 @@ pgsql_real_start() {
         ocf_log debug "PostgreSQL still hasn't started yet. Waiting..."
     done
 
-    # create replication slot on the master and slave nodes.
-    # creating slot on the slave node is in preparation for failover.
+    # delete replication slots on all nodes. On master node will be created during promotion.
     if use_replication_slot; then
-        create_replication_slot
+        delete_replication_slots
         if [ $? -eq $OCF_ERR_GENERIC ]; then
-            ocf_exit_reason "PostgreSQL can't create replication_slot."
+            ocf_exit_reason "PostgreSQL can't clean up replication_slot."
             return $OCF_ERR_GENERIC
         fi
     fi
@@ -687,6 +689,15 @@ pgsql_promote() {
     touch $PGSQL_LOCK
     show_master_baseline
 
+    # create replication slots on master before promotion
+    if use_replication_slot; then
+        create_replication_slots
+        if [ $? -eq $OCF_ERR_GENERIC ]; then
+            ocf_exit_reason "PostgreSQL can't create replication_slot."
+            return $OCF_ERR_GENERIC
+        fi
+    fi
+
     if ocf_is_true ${OCF_RESKEY_restart_on_promote}; then
         ocf_log info "Restarting PostgreSQL instead of promote."
         #stop : this function returns $OCF_SUCCESS only.
@@ -702,7 +713,7 @@ pgsql_promote() {
             return $OCF_ERR_GENERIC
         fi
     else
-        runasowner "$OCF_RESKEY_pgctl -D $OCF_RESKEY_pgdata promote"
+        runasowner "$OCF_RESKEY_pgctl -D $OCF_RESKEY_pgdata -W promote"
         if [ $? -eq 0 ]; then
             ocf_log info "PostgreSQL promote command sent."
         else
@@ -1063,8 +1074,13 @@ pgsql_pre_promote() {
             cmp_location=`printf "$master_baseline\n$my_master_baseline\n" |\
                           sort | head -1`
             if [ "$cmp_location" != "$my_master_baseline" ]; then
+                # We used to set the failcount to INF for the resource here in
+                # order to move the master to the other node. However, setting
+                # the failcount should be done only by the CRM and so this use
+                # got deprecated in pacemaker version 1.1.17. Now we do the
+                # "ban resource from the node".
                 ocf_exit_reason "My data is newer than new master's one. New master's location : $master_baseline"
-                exec_with_retry 0 $CRM_FAILCOUNT -r $OCF_RESOURCE_INSTANCE -U $NODENAME -v INFINITY
+                exec_with_retry 0 $CRM_RESOURCE -B -r $OCF_RESOURCE_INSTANCE -N $NODENAME -Q
                 return $OCF_ERR_GENERIC
             fi
         fi
@@ -1321,12 +1337,9 @@ create_replication_slot_name() {
         number_of_nodes=`echo $NODE_LIST | wc -w`
     fi
 
-    # If the number of nodes 2 or less, Master node has 1 or less Slave node.
-    # The Master node should have 1 slot for the Slave, which is named "$OCF_RES_KEY_replication_slot_name".
-    if [ $number_of_nodes -le 2 ]; then
-        replication_slot_name_list="$OCF_RESKEY_replication_slot_name"
+    if [ $number_of_nodes -le 0 ]; then
+        replication_slot_name_list=""
 
-    # If the number of nodes 3 or more, the Master has some Slave nodes.
     # The Master node should have some slots equal to the number of Slaves, and
     # the Slave nodes connect to their dedicated slot on the Master.
     # To ensuring that the slots name are each unique, add postfix to $OCF_RESKEY_replication_slot.
@@ -1348,7 +1361,34 @@ create_replication_slot_name() {
     echo $replication_slot_name_list
 }
 
-create_replication_slot() {
+delete_replication_slot(){
+    DELETE_REPLICATION_SLOT_sql="SELECT pg_drop_replication_slot('$1');"
+    output=`exec_sql "$DELETE_REPLICATION_SLOT_sql"`
+    return $?
+}
+
+delete_replication_slots() {
+    local replication_slot_name_list
+    local replication_slot_name
+
+    replication_slot_name_list=`create_replication_slot_name`
+    ocf_log debug "replication slot names are $replication_slot_name_list."
+
+    for replication_slot_name in $replication_slot_name_list
+    do
+        if [ `check_replication_slot $replication_slot_name` = "1" ]; then
+            delete_replication_slot $replication_slot_name
+            if [ $? -eq 0 ]; then
+                ocf_log info "PostgreSQL delete the replication slot($replication_slot_name)."
+            else
+                ocf_exit_reason "$output"
+                return $OCF_ERR_GENERIC
+            fi
+        fi
+    done
+}
+
+create_replication_slots() {
     local replication_slot_name
     local replication_slot_name_list
     local output
@@ -1363,11 +1403,8 @@ create_replication_slot() {
     do
         # If the same name slot is already exists, initialize(delete and create) the slot.
         if [ `check_replication_slot $replication_slot_name` = "1" ]; then
-            DELETE_REPLICATION_SLOT_sql="SELECT pg_drop_replication_slot('$replication_slot_name');"
-            output=`exec_sql "$DELETE_REPLICATION_SLOT_sql"`
-            rc=$?
-
-            if [ $rc -eq 0 ]; then
+            delete_replication_slot $replication_slot_name
+            if [ $? -eq 0 ]; then
                 ocf_log info "PostgreSQL delete the replication slot($replication_slot_name)."
             else
                 ocf_exit_reason "$output"
@@ -1400,6 +1437,7 @@ check_replication_slot(){
     echo "$output"
 }
 
+# On postgreSQL 10 or later, "location" means "lsn".
 get_my_location() {
     local rc
     local output
@@ -1438,6 +1476,7 @@ get_my_location() {
     return 0
 }
 
+# On postgreSQL 10 or later, "xlog_location" means "wal_lsn".
 show_xlog_location() {
     local location
 
@@ -1445,6 +1484,7 @@ show_xlog_location() {
     exec_with_retry 0 $CRM_ATTR_REBOOT -N "$NODENAME" -n "$PGSQL_XLOG_LOC_NAME" -v "$location"
 }
 
+# On postgreSQL 10 or later, "xlog_location" means "wal_lsn".
 delete_xlog_location() {
     exec_with_retry 5 $CRM_ATTR_REBOOT -N "$NODENAME" -n "$PGSQL_XLOG_LOC_NAME" -D
 }
@@ -1474,7 +1514,7 @@ set_async_mode_all() {
 }
 
 set_async_mode() {
-    cat $REP_MODE_CONF |  grep -q -e "[,' ]$1[,' ]"
+    cat $REP_MODE_CONF |  grep -q -E "(\"$1\")|([,' ]$1[,' ])"
     if [ $? -eq 0 ]; then
         ocf_log info "Setup $1 into async mode."
         runasowner -q err "echo \"synchronous_standby_names = ''\" > \"$REP_MODE_CONF\""
@@ -1513,7 +1553,6 @@ reload_conf() {
 }
 
 user_recovery_conf() {
-    local number_of_nodes
     local nodename_tmp
 
     # put archive_cleanup_command and recovery_end_command only when defined by user
@@ -1525,13 +1564,8 @@ user_recovery_conf() {
     fi
 
     if use_replication_slot; then
-        number_of_nodes=`echo $NODE_LIST | wc -w`
-        if [ $number_of_nodes -le 2 ]; then
-            echo "primary_slot_name = '${OCF_RESKEY_replication_slot_name}'"
-        else
-            nodename_tmp=`echo "$NODENAME" | tr 'A-Z.-' 'a-z__'`
-            echo "primary_slot_name = '${OCF_RESKEY_replication_slot_name}_$nodename_tmp'"
-        fi
+        nodename_tmp=`echo "$NODENAME" | tr 'A-Z.-' 'a-z__'`
+        echo "primary_slot_name = '${OCF_RESKEY_replication_slot_name}_$nodename_tmp'"
     fi
 }
 
@@ -1835,6 +1869,36 @@ pgsql_validate_all() {
     fi
 
     if is_replication; then
+        REP_MODE_CONF=${OCF_RESKEY_tmpdir}/rep_mode.conf
+        PGSQL_LOCK=${OCF_RESKEY_tmpdir}/PGSQL.lock
+        XLOG_NOTE_FILE=${OCF_RESKEY_tmpdir}/xlog_note
+
+        CRM_MASTER="${HA_SBIN_DIR}/crm_master -l reboot"
+        CRM_ATTR_REBOOT="${HA_SBIN_DIR}/crm_attribute -l reboot"
+        CRM_ATTR_FOREVER="${HA_SBIN_DIR}/crm_attribute -l forever"
+        CRM_RESOURCE="${HA_SBIN_DIR}/crm_resource"
+
+        CAN_NOT_PROMOTE="-INFINITY"
+        CAN_PROMOTE="100"
+        PROMOTE_ME="1000"
+
+        CHECK_MS_SQL="select pg_is_in_recovery()"
+        ocf_version_cmp "$version" "10"
+        if [ $? -eq 1 ] || [ $? -eq 2 ]; then
+            CHECK_XLOG_LOC_SQL="select pg_last_wal_replay_lsn(),pg_last_wal_receive_lsn()"
+        else
+            CHECK_XLOG_LOC_SQL="select pg_last_xlog_replay_location(),pg_last_xlog_receive_location()"
+        fi
+        CHECK_REPLICATION_STATE_SQL="select application_name,upper(state),upper(sync_state) from pg_stat_replication"
+
+        PGSQL_STATUS_ATTR="${RESOURCE_NAME}-status"
+        PGSQL_DATA_STATUS_ATTR="${RESOURCE_NAME}-data-status"
+        PGSQL_XLOG_LOC_NAME="${RESOURCE_NAME}-xlog-loc"
+        PGSQL_MASTER_BASELINE="${RESOURCE_NAME}-master-baseline"
+
+        NODE_LIST=`echo $OCF_RESKEY_node_list | tr '[A-Z]' '[a-z]'`
+        RE_CONTROL_SLAVE="false"
+
         if ! ocf_is_ms; then
             ocf_exit_reason "Replication(rep_mode=async or sync) requires Master/Slave configuration."
             return $OCF_ERR_CONFIGURED
@@ -1961,7 +2025,6 @@ then
     exit $OCF_ERR_GENERIC
 fi
 
-
 PIDFILE=${OCF_RESKEY_pgdata}/postmaster.pid
 BACKUPLABEL=${OCF_RESKEY_pgdata}/backup_label
 RESOURCE_NAME=`echo $OCF_RESOURCE_INSTANCE | cut -d ":" -f 1`
@@ -1969,33 +2032,6 @@ PGSQL_WAL_RECEIVER_STATUS_ATTR="${RESOURCE_NAME}-receiver-status"
 RECOVERY_CONF=${OCF_RESKEY_pgdata}/recovery.conf
 NODENAME=$(ocf_local_nodename | tr '[A-Z]' '[a-z]')
 
-if is_replication; then
-    REP_MODE_CONF=${OCF_RESKEY_tmpdir}/rep_mode.conf
-    PGSQL_LOCK=${OCF_RESKEY_tmpdir}/PGSQL.lock
-    XLOG_NOTE_FILE=${OCF_RESKEY_tmpdir}/xlog_note
-
-    CRM_MASTER="${HA_SBIN_DIR}/crm_master -l reboot"
-    CRM_ATTR_REBOOT="${HA_SBIN_DIR}/crm_attribute -l reboot"
-    CRM_ATTR_FOREVER="${HA_SBIN_DIR}/crm_attribute -l forever"
-    CRM_FAILCOUNT="${HA_SBIN_DIR}/crm_failcount"
-
-    CAN_NOT_PROMOTE="-INFINITY"
-    CAN_PROMOTE="100"
-    PROMOTE_ME="1000"
-
-    CHECK_MS_SQL="select pg_is_in_recovery()"
-    CHECK_XLOG_LOC_SQL="select pg_last_xlog_replay_location(),pg_last_xlog_receive_location()"
-    CHECK_REPLICATION_STATE_SQL="select application_name,upper(state),upper(sync_state) from pg_stat_replication"
-
-    PGSQL_STATUS_ATTR="${RESOURCE_NAME}-status"
-    PGSQL_DATA_STATUS_ATTR="${RESOURCE_NAME}-data-status"
-    PGSQL_XLOG_LOC_NAME="${RESOURCE_NAME}-xlog-loc"
-    PGSQL_MASTER_BASELINE="${RESOURCE_NAME}-master-baseline"
-
-    NODE_LIST=`echo $OCF_RESKEY_node_list | tr '[A-Z]' '[a-z]'`
-    RE_CONTROL_SLAVE="false"
-fi
-
 case "$1" in
     methods)    pgsql_methods
                 exit $?;;
diff --git a/heartbeat/portblock b/heartbeat/portblock
index 776ad17..a518f49 100755
--- a/heartbeat/portblock
+++ b/heartbeat/portblock
@@ -253,7 +253,7 @@ save_tcp_connections()
 		netstat -tn |awk -F '[:[:space:]]+' '
 			$8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \
 			{printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' |
-			dd of="$statefile".new conv=fsync && 
+			dd of="$statefile".new conv=fsync status=none &&
 			mv "$statefile".new "$statefile"
 	else
 		netstat -tn |awk -F '[:[:space:]]+' '
diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster
index 74378be..ff57934 100755
--- a/heartbeat/rabbitmq-cluster
+++ b/heartbeat/rabbitmq-cluster
@@ -37,7 +37,7 @@ RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia"
 RMQ_PID_DIR="/var/run/rabbitmq"
 RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid"
 RMQ_LOG_DIR="/var/log/rabbitmq"
-NODENAME=$(ocf_local_nodename)
+NODENAME=$(ocf_attribute_target)
 
 # this attr represents the current active local rmq node name.
 # when rmq stops or the node is fenced, this attr disappears
@@ -56,7 +56,9 @@ meta_data() {
 <version>1.0</version>
 
 <longdesc lang="en">
-Starts cloned rabbitmq cluster instance
+Starts cloned rabbitmq cluster instance. NB: note that this RA
+cannot be spawned across a mix of pacemaker and pacemaker-remote nodes.
+Only on pacemaker *or* pacemaker-remote nodes exclusively.
 </longdesc>
 <shortdesc lang="en">rabbitmq clustered</shortdesc>
 
@@ -100,7 +102,7 @@ rmq_wipe_data()
 rmq_local_node()
 {
 
-	local node_name=$(rabbitmqctl status 2>&1 | sed -n -e "s/^.*[S|s]tatus of node \(.*\)\s.*$/\1/p" | tr -d "'")
+	local node_name=$($RMQ_CTL status 2>&1 | sed -n -e "s/^.*[S|s]tatus of node \(.*\)\s.*$/\1/p" | tr -d "'")
 
 	if [ -z "$node_name" ]; then
 		node_name=$(cat /etc/rabbitmq/rabbitmq-env.conf 2>/dev/null | grep "\s*RABBITMQ_NODENAME=" | awk -F= '{print $2}')
@@ -111,7 +113,25 @@ rmq_local_node()
 
 rmq_join_list()
 {
-    cibadmin -Q --xpath "//node_state[@crmd='online']//nvpair[@name='$RMQ_CRM_ATTR_COOKIE']" | grep "$RMQ_CRM_ATTR_COOKIE" | sed -n -e "s/^.*value=.\(.*\)\".*$/\1/p"
+	local join_list=$(cibadmin -Q --xpath "//node_state[@crmd='online']//nvpair[@name='$RMQ_CRM_ATTR_COOKIE']" | grep "$RMQ_CRM_ATTR_COOKIE" | sed -n -e "s/^.*value=.\(.*\)\".*$/\1/p")
+	# If join_list is empty we want to check if there are any remote nodes
+	# where rabbitmq is allowed to run (i.e. nodes without the crmd=online selector)
+	if [ -z "$join_list" ]; then
+		# Get all the nodes written in the ATTR_COOKIE no matter if
+		# they are online or not. This will be one line per node like
+		# rabbit at overcloud-rabbit-0
+		# rabbit at overcloud-rabbit-1
+		# ...
+		local remote_join_list=$(cibadmin -Q --xpath "//node_state//nvpair[@name='$RMQ_CRM_ATTR_COOKIE']" | grep "$RMQ_CRM_ATTR_COOKIE" | sed -n -e "s/^.*value=.\(.*\)\".*$/\1/p")
+		# The following expression prepares a filter like '-e overcloud-rabbit-0 -e overcloud-rabbit-1 -e ...'
+		local filter=$(crm_mon -r --as-xml | xmllint --format --xpath "//nodes//node[@online='true' and @standby='false']/@name" - | xargs -n1 echo | awk -F= '{print "-e "$2}')
+		# export the intersection which gives us only the nodes that
+		# a) wrote their namein the cib attrd
+		# b) run on nodes where pacemaker_remote is enabled
+		join_list="$(echo $remote_join_list | grep $filter)"
+	fi
+
+	echo $join_list
 }
 
 rmq_write_nodename()
@@ -290,7 +310,7 @@ rmq_forget_cluster_node_remotely() {
 
 	ocf_log info "Forgetting $node_to_forget via nodes [ $(echo $running_cluster_nodes | tr '\n' ' ') ]."
 	for running_cluster_node in $running_cluster_nodes; do
-		rabbitmqctl -n $running_cluster_node forget_cluster_node $node_to_forget
+		$RMQ_CTL -n $running_cluster_node forget_cluster_node $node_to_forget
 		if [ $? = 0 ]; then
 			ocf_log info "Succeeded forgetting $node_to_forget via $running_cluster_node."
 			return
@@ -320,7 +340,7 @@ rmq_notify() {
 
 	# forget each stopped rmq instance in the provided pcmk node in the list.
 	for node in $(echo "$node_list"); do
-		local rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $node -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)"
+		local rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $(ocf_attribute_target $node) -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)"
 		if [ -z "$rmq_node" ]; then
 			ocf_log warn "Unable to map pcmk node $node to a known rmq node."
 			continue	
@@ -368,83 +388,92 @@ rmq_start() {
 		return $OCF_ERR_GENERIC
 	fi
 
-	# Restore users and users' permissions (if any)
+	# Restore users, user permissions, and policies (if any)
 	BaseDataDir=`dirname $RMQ_DATA_DIR`
-	if [ -f $BaseDataDir/users.erl ] ; then
-		rabbitmqctl eval "
-			%% Run only if Mnesia is ready.
-			lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso
-			begin
-				[WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]),
-
-				%% Read users first
-				{ok, [Users]} = file:consult(\"$BaseDataDir/users.erl\"),
-
-				Upgrade = fun
-					({internal_user, A, B, C}) -> {internal_user, A, B, C, rabbit_password_hashing_md5};
-					({internal_user, A, B, C, D}) -> {internal_user, A, B, C, D}
-				end,
+	$RMQ_CTL eval "
+		%% Run only if Mnesia is ready.
+		lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso
+		begin
+			Restore = fun(Table, PostprocessFun, Filename) ->
+				case file:consult(Filename) of
+					{error, _} ->
+						ok;
+					{ok, [Result]} ->
+						lists:foreach(fun(X) -> mnesia:dirty_write(Table, PostprocessFun(X)) end, Result),
+						file:delete(Filename)
+				end
+			end,
 
-				Downgrade = fun
-					({internal_user, A, B, C}) -> {internal_user, A, B, C};
-					({internal_user, A, B, C, rabbit_password_hashing_md5}) -> {internal_user, A, B, C};
-					%% Incompatible scheme, so we will loose user's password ('B' value) during conversion.
-					%% Unfortunately, this case will require manual intervention - user have to run:
-					%%    rabbitmqctl change_password <A> <somenewpassword>
-					({internal_user, A, B, C, _}) -> {internal_user, A, B, C}
-				end,
+			%% Restore users
 
-				case WildPattern of
-					%% Version < 3.6.0
-					{internal_user,'_','_','_'} ->
-						lists:foreach(fun(X) -> mnesia:dirty_write(rabbit_user, Downgrade(X)) end, Users);
-					%% Version >= 3.6.0
-					{internal_user,'_','_','_','_'} ->
-						lists:foreach(fun(X) -> mnesia:dirty_write(rabbit_user, Upgrade(X)) end, Users)
-				end,
+			Upgrade = fun
+				({internal_user, A, B, C}) -> {internal_user, A, B, C, rabbit_password_hashing_md5};
+				({internal_user, A, B, C, D}) -> {internal_user, A, B, C, D}
+			end,
 
-				ok = file:delete(\"$BaseDataDir/users.erl\")
-			end.
-		"
-	fi
-	if [ -f $BaseDataDir/users_perms.erl ] ; then
-		rabbitmqctl eval "
-			%% Run only if Mnesia is ready.
-			lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso
-			begin
-				{ok, [UsersPerms]} = file:consult(\"$BaseDataDir/users_perms.erl\"),
-				lists:foreach(fun(X) -> mnesia:dirty_write(rabbit_user_permission, X) end, UsersPerms),
-
-				ok = file:delete(\"$BaseDataDir/users_perms.erl\")
-			end.
-		"
-	fi
+			Downgrade = fun
+				({internal_user, A, B, C}) -> {internal_user, A, B, C};
+				({internal_user, A, B, C, rabbit_password_hashing_md5}) -> {internal_user, A, B, C};
+				%% Incompatible scheme, so we will loose user's password ('B' value) during conversion.
+				%% Unfortunately, this case will require manual intervention - user have to run:
+				%%    rabbitmqctl change_password <A> <somenewpassword>
+				({internal_user, A, B, C, _}) -> {internal_user, A, B, C}
+			end,
+
+			%% Check db scheme first
+			[WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]),
+			case WildPattern of
+				%% Version < 3.6.0
+				{internal_user,'_','_','_'} ->
+					Restore(rabbit_user, Downgrade, \"$BaseDataDir/users.erl\");
+				%% Version >= 3.6.0
+				{internal_user,'_','_','_','_'} ->
+					Restore(rabbit_user, Upgrade, \"$BaseDataDir/users.erl\")
+			end,
+
+			NoOp = fun(X) -> X end,
 
+			%% Restore user permissions
+			Restore(rabbit_user_permission, NoOp, \"$BaseDataDir/users_perms.erl\"),
+
+			%% Restore policies
+			Restore(rabbit_runtime_parameters, NoOp, \"$BaseDataDir/policies.erl\")
+		end.
+	"
 	return $OCF_SUCCESS
 }
 
 rmq_stop() {
-	# Backup users and users' permissions
+	# Backup users, user permissions, and policies
 	BaseDataDir=`dirname $RMQ_DATA_DIR`
-	rabbitmqctl eval "
+	$RMQ_CTL eval "
 		%% Run only if Mnesia is still available.
 		lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso
 		begin
-			[WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]),
+			Backup = fun(Table, SelectPattern, Filter, Filename) ->
+				Result = case catch mnesia:dirty_select(Table, [{SelectPattern, [Filter], ['\\\$_']}]) of
+					{'EXIT', _} -> [];
+					Any -> Any
+				end,
+				Result /= [] andalso file:write_file(Filename, io_lib:fwrite(\"~p.~n\", [Result]))
+			end,
 
-			Users = case WildPattern of
+			%% Backup users
+			%% Check db scheme first
+			[WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]),
+			UsersSelectPattern = case WildPattern of
 				%% Version < 3.6.0
-				{internal_user,'_','_','_'} ->
-					mnesia:dirty_select(rabbit_user, [{ {internal_user, '\\\$1', '_', '_'}, [{'/=', '\\\$1', <<\"guest\">>}], ['\\\$_'] } ]);
+				{internal_user,'_','_','_'} -> {internal_user, '\\\$1', '_', '_'};
 				%% Version >= 3.6.0
-				{internal_user,'_','_','_','_'} ->
-					mnesia:dirty_select(rabbit_user, [{ {internal_user, '\\\$1', '_', '_', '_'}, [{'/=', '\\\$1', <<\"guest\">>}], ['\\\$_'] } ])
+				{internal_user,'_','_','_','_'} -> {internal_user, '\\\$1', '_', '_', '_'}
 			end,
+			Backup(rabbit_user, UsersSelectPattern, {'/=', '\\\$1', <<\"guest\">>}, \"$BaseDataDir/users.erl\"),
 
-			Users /= [] andalso file:write_file(\"$BaseDataDir/users.erl\", io_lib:fwrite(\"~p.~n\", [Users])),
+			%% Backup user permissions
+			Backup(rabbit_user_permission, {'\\\$1', {'\\\$2', '\\\$3','\\\$4'}, '\\\$5'}, {'/=', '\\\$3', <<\"guest\">>}, \"$BaseDataDir/users_perms.erl\"),
 
-			UsersPerms = mnesia:dirty_select(rabbit_user_permission, [{{'\\\$1', {'\\\$2', '\\\$3','\\\$4'}, '\\\$5'}, [{'/=', '\\\$3', <<\"guest\">>}], ['\\\$_']}]),
-			UsersPerms /= [] andalso file:write_file(\"$BaseDataDir/users_perms.erl\", io_lib:fwrite(\"~p.~n\", [UsersPerms]))
+			%% Backup policies
+			Backup(rabbit_runtime_parameters, {runtime_parameters, {'_', '\\\$1', '_'}, '_'}, {'==', '\\\$1', <<\"policy\">>}, \"$BaseDataDir/policies.erl\")
 		end.
 	"
 
diff --git a/heartbeat/redis b/heartbeat/redis
index d08e57a..bc97f14 100755
--- a/heartbeat/redis
+++ b/heartbeat/redis
@@ -33,6 +33,9 @@ REDIS_REPLICATION_PORT="$OCF_RESKEY_port"
 if ! [ -f $REDIS_CHECK_DUMP ]; then
 	REDIS_CHECK_DUMP="$(which redis-check-dump 2>/dev/null)"
 fi
+if [ -z "$REDIS_CHECK_DUMP" ]; then
+	REDIS_CHECK_DUMP="$(which redis-check-rdb 2>/dev/null)"
+fi
 
 if [ -f "$REDIS_CONFIG" ]; then
 	REDIS_DUMP_DIR="$(cat $REDIS_CONFIG | grep "^\s*dir\s" | awk '{ print $2 }' 2>/dev/null)"
@@ -185,7 +188,8 @@ function last_known_master()
 }
 
 function crm_master_reboot() {
-	"${HA_SBIN_DIR}/crm_master" -l reboot "$@"
+	local node=$(ocf_attribute_target)
+	"${HA_SBIN_DIR}/crm_master" -N $node -l reboot "$@"
 }
 
 function calculate_score()
@@ -369,6 +373,11 @@ function start() {
 		fi
 	done
 
+	while ! [ -s "$REDIS_PIDFILE" ]; do
+		ocf_log debug "start: Waiting for pid file '$REDIS_PIDFILE' to appear"
+		sleep 1
+	done
+
 	ocf_is_ms && demote # pacemaker expects resources to start in slave mode
 
 	monitor
@@ -537,7 +546,7 @@ function validate() {
 	fi
 }
 
-NODENAME=$(ocf_local_nodename)
+NODENAME=$(ocf_attribute_target)
 if [ -f "$REDIS_CONFIG" ]; then
 	clientpasswd="$(cat $REDIS_CONFIG | sed -n -e  's/^\s*requirepass\s*\(.*\)\s*$/\1/p' | tail -n 1)"
 fi
diff --git a/heartbeat/docker b/heartbeat/rkt
similarity index 62%
copy from heartbeat/docker
copy to heartbeat/rkt
index 47f099e..666f885 100755
--- a/heartbeat/docker
+++ b/heartbeat/rkt
@@ -1,10 +1,10 @@
 #!/bin/sh
 #
-# The docker HA resource agent creates and launches a docker container
-# based off a supplied docker image. Containers managed by this agent
-# are both created and removed upon the agent's start and stop actions.
+# The rkt HA resource agent creates and launches a container based off
+# a supplied image. Containers managed by this agent are both created
+# and removed upon the agent's start and stop actions.
 #
-# Copyright (c) 2014 David Vossel <davidvossel at gmail.com>
+# Copyright (c) 2017 Valentin Vidic <Valentin.Vidic at CARNet.hr>
 #                    All Rights Reserved.
 #
 # This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write the Free Software Foundation,
-# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 #
 
 #######################################################################
@@ -40,22 +40,22 @@ meta_data()
 	cat <<END
 <?xml version="1.0"?>
 <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
-<resource-agent name="docker">
+<resource-agent name="rkt">
 <version>1.0</version>
 
 <longdesc lang="en">
-The docker HA resource agent creates and launches a docker container
-based off a supplied docker image. Containers managed by this agent
+The rkt HA resource agent creates and launches a container
+based off a supplied image. Containers managed by this agent
 are both created and removed upon the agent's start and stop actions.
 </longdesc>
-<shortdesc lang="en">Docker container resource agent.</shortdesc>
+<shortdesc lang="en">rkt container resource agent.</shortdesc>
 
 <parameters>
 <parameter name="image" required="1" unique="0">
 <longdesc lang="en">
-The docker image to base this container off of.
+The image to base this container off of.
 </longdesc>
-<shortdesc lang="en">docker image</shortdesc>
+<shortdesc lang="en">image</shortdesc>
 <content type="string"/>
 </parameter>
 
@@ -64,13 +64,13 @@ The docker image to base this container off of.
 The name to give the created container. By default this will
 be that resource's instance name.
 </longdesc>
-<shortdesc lang="en">docker container name</shortdesc>
+<shortdesc lang="en">container name</shortdesc>
 <content type="string"/>
 </parameter>
 
 <parameter name="allow_pull" unique="0">
 <longdesc lang="en">
-Allow the image to be pulled from the configured docker registry when
+Allow the image to be pulled from the configured registry when
 the image does not exist locally. NOTE, this can drastically increase
 the time required to start the container if the image repository is
 pulled over the network.
@@ -81,12 +81,10 @@ pulled over the network.
 
 <parameter name="run_opts" required="0" unique="0">
 <longdesc lang="en">
-Add options to be appended to the 'docker run' command which is used
+Add options to be appended to the 'rkt run' command which is used
 when creating the container during the start action. This option allows
 users to do things such as setting a custom entry point and injecting
-environment variables into the newly created container. Note the '-d'
-option is supplied regardless of this value to force containers to run
-in the background.
+environment variables into the newly created container.
 
 NOTE: Do not explicitly specify the --name argument in the run_opts. This
 agent will set --name using either the resource's instance or the name
@@ -99,25 +97,31 @@ provided in the 'name' argument of this agent.
 
 <parameter name="run_cmd" required="0" unique="0">
 <longdesc lang="en">
-Specifiy a command to launch within the container once
+Specify a command to launch within the container once
 it has initialized.
 </longdesc>
 <shortdesc lang="en">run command</shortdesc>
 <content type="string"/>
 </parameter>
 
+<parameter name="mount_points" required="0" unique="0">
+<longdesc lang="en">
+A comma separated list of directories that the container is expecting to use.
+The agent will ensure they exist by running 'mkdir -p'
+</longdesc>
+<shortdesc lang="en">Required mount points</shortdesc>
+<content type="string"/>
+</parameter>
+
 <parameter name="monitor_cmd" required="0" unique="0">
 <longdesc lang="en">
-Specifiy the full path of a command to launch within the container to check
+Specify the full path of a command to launch within the container to check
 the health of the container. This command must return 0 to indicate that
 the container is healthy. A non-zero return code will indicate that the
 container has failed and should be recovered.
 
-If 'docker exec' is supported, it is used to execute the command. If not,
-nsenter is used.
-
 Note: Using this method for monitoring processes inside a container
-is not recommended, as containerd tries to track processes running
+is not recommended, as rkt tries to track processes running
 inside the container and does not deal well with many short-lived
 processes being spawned. Ensure that your container monitors its
 own processes and terminates on fatal error rather than invoking
@@ -136,16 +140,6 @@ shutdown
 <content type="boolean"/>
 </parameter>
 
-<parameter name="reuse" required="0" unique="0">
-<longdesc lang="en">
-Allow the container to be reused after stopping the container. By default
-containers are removed after stop. With the reuse option containers
-will persist after the container stops.
-</longdesc>
-<shortdesc lang="en">reuse container</shortdesc>
-<content type="boolean"/>
-</parameter>
-
 </parameters>
 
 <actions>
@@ -162,7 +156,7 @@ END
 #######################################################################
 REQUIRE_IMAGE_PULL=0
 
-docker_usage()
+rkt_usage()
 {
 	cat <<END
 usage: $0 {start|stop|monitor|validate-all|meta-data}
@@ -176,22 +170,19 @@ monitor_cmd_exec()
 {
 	local rc=$OCF_SUCCESS
 	local out
+	local uuid
 
 	if [ -z "$OCF_RESKEY_monitor_cmd" ]; then
 		return $rc
 	fi
 
-	if docker exec --help >/dev/null 2>&1; then
-		out=$(docker exec ${CONTAINER} $OCF_RESKEY_monitor_cmd 2>&1)
-		rc=$?
-	else
-		out=$(echo "$OCF_RESKEY_monitor_cmd" | nsenter --target $(docker inspect --format {{.State.Pid}} ${CONTAINER}) --mount --uts --ipc --net --pid 2>&1)
-		rc=$?
-	fi
+	uuid=$(container_uuid)
+	out=$(rkt enter $uuid $OCF_RESKEY_monitor_cmd 2>&1)
+	rc=$?
 
 	if [ $rc -eq 127 ]; then
 		ocf_log err "monitor cmd failed (rc=$rc), output: $out"
-		ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd} , not found within container."
+		ocf_exit_reason "monitor_cmd, ${OCF_RESKEY_monitor_cmd}, not found within container."
 		# there is no recovering from this, exit immediately
 		exit $OCF_ERR_ARGS
 	elif [ $rc -ne 0 ]; then
@@ -206,26 +197,35 @@ monitor_cmd_exec()
 
 container_exists()
 {
-	docker inspect --format {{.State.Running}} $CONTAINER | egrep '(true|false)' >/dev/null 2>&1
+	rkt list --no-legend | awk -v C=${CONTAINER} '$2 == C {exit 0} ENDFILE {exit 1}'
+}
+
+container_uuid()
+{
+	rkt list --no-legend --full | awk -v C=${CONTAINER} '$2 == C {print $1; exit}'
+}
+
+container_state()
+{
+	rkt list --no-legend | awk -v C=${CONTAINER} '$2 == C {print $4; exit}'
 }
 
 remove_container()
 {
-	if ocf_is_true "$OCF_RESKEY_reuse"; then
-		# never remove the container if we have reuse enabled.
-		return 0
-	fi
+	local uuid
 
 	container_exists
 	if [ $? -ne 0 ]; then
 		# don't attempt to remove a container that doesn't exist
 		return 0
 	fi
+
+	uuid=$(container_uuid)
 	ocf_log notice "Cleaning up inactive container, ${CONTAINER}."
-	ocf_run docker rm $CONTAINER
+	ocf_run rkt rm $uuid
 }
 
-docker_simple_status()
+rkt_simple_status()
 {
 	local val
 
@@ -234,26 +234,22 @@ docker_simple_status()
 		return $OCF_NOT_RUNNING
 	fi
 
-	# retrieve the 'Running' attribute for the container
-	val=$(docker inspect --format {{.State.Running}} $CONTAINER 2>/dev/null)
-	if [ $? -ne 0 ]; then
-		#not running as a result of container not being found
-		return $OCF_NOT_RUNNING
-	fi
-
-	if ocf_is_true "$val"; then
+	# retrieve the 'STATE' attribute for the container
+	val=$(container_state)
+	if [ "$val" = "running" ]; then
 		# container exists and is running
 		return $OCF_SUCCESS
 	fi
 
+	ocf_log debug "container ${CONTAINER} state is $val"
 	return $OCF_NOT_RUNNING
 }
 
-docker_monitor()
+rkt_monitor()
 {
 	local rc=0
 
-	docker_simple_status
+	rkt_simple_status
 	rc=$?
 
 	if [ $rc -ne 0 ]; then
@@ -263,56 +259,70 @@ docker_monitor()
 	monitor_cmd_exec
 }
 
-docker_start()
+rkt_create_mounts() {
+	oldIFS="$IFS"
+	IFS=","
+	for directory in $OCF_RESKEY_mount_points; do
+		mkdir -p "$directory"
+	done
+	IFS="$oldIFS"
+}
+
+rkt_start()
 {
-	local run_opts="-d --name=${CONTAINER}"
+	rkt_create_mounts
+	local run_opts="--name=${CONTAINER}"
+
 	# check to see if the container has already started
-	docker_simple_status
+	rkt_simple_status
 	if [ $? -eq $OCF_SUCCESS ]; then
 		return $OCF_SUCCESS
 	fi
 
+	if [ -n "$OCF_RESKEY_run_cmd" ]; then
+		run_opts="$run_opts --exec=$OCF_RESKEY_run_cmd"
+	fi
+
 	if [ -n "$OCF_RESKEY_run_opts" ]; then
 		run_opts="$run_opts $OCF_RESKEY_run_opts"
 	fi
 
 	if [ $REQUIRE_IMAGE_PULL -eq 1 ]; then
 		ocf_log notice "Beginning pull of image, ${OCF_RESKEY_image}"
-		docker pull "${OCF_RESKEY_image}"
+		rkt fetch "${OCF_RESKEY_image}"
 		if [ $? -ne 0 ]; then
 			ocf_exit_reason "failed to pull image ${OCF_RESKEY_image}"
 			return $OCF_ERR_GENERIC
 		fi
 	fi
 
-	if ocf_is_true "$OCF_RESKEY_reuse" && container_exists; then
-		ocf_log info "starting existing container $CONTAINER."
-		ocf_run docker start $CONTAINER
-	else
-		# make sure any previous container matching our container name is cleaned up first.
-		# we already know at this point it wouldn't be running
-		remove_container
-		ocf_log info "running container $CONTAINER for the first time"
-		ocf_run docker run $run_opts $OCF_RESKEY_image $OCF_RESKEY_run_cmd
-	fi
+	# make sure any previous container matching our container name is cleaned up first.
+	# we already know at this point it wouldn't be running
+	remove_container
+	ocf_log info "Starting container, ${CONTAINER}."
+	ocf_run systemd-run --slice=machine rkt run $OCF_RESKEY_image $run_opts
 
 	if [ $? -ne 0 ]; then
-		ocf_exit_reason "docker failed to launch container"
+		ocf_exit_reason "Failed to launch container"
 		return $OCF_ERR_GENERIC
 	fi
 
+	while ! container_exists || [ "$(container_state)" == "preparing" ] ; do
+		ocf_log debug "waiting for container to start"
+		sleep 1
+	done
 
 	# wait for monitor to pass before declaring that the container is started
 	while true; do
-		docker_simple_status
+		rkt_simple_status
 		if [ $? -ne $OCF_SUCCESS ]; then
-			ocf_exit_reason "Newly created docker container exited after start"
+			ocf_exit_reason "Newly created container exited after start"
 			return $OCF_ERR_GENERIC
 		fi
 
 		monitor_cmd_exec
 		if [ $? -eq $OCF_SUCCESS ]; then
-			ocf_log notice "Container $CONTAINER  started successfully"
+			ocf_log notice "Container ${CONTAINER} started successfully as $(container_uuid)"
 			return $OCF_SUCCESS
 		fi
 
@@ -321,27 +331,31 @@ docker_start()
 	done
 }
 
-docker_stop()
+rkt_stop()
 {
 	local timeout=60
-	docker_simple_status
-	if [ $? -eq  $OCF_NOT_RUNNING ]; then
+	local uuid
+
+	rkt_simple_status
+	if [ $? -eq $OCF_NOT_RUNNING ]; then
 		remove_container
 		return $OCF_SUCCESS
 	fi
 
 	if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
-		timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000) -10 ))
+		timeout=$(( $OCF_RESKEY_CRM_meta_timeout/1000 - 10 ))
 		if [ $timeout -lt 10 ]; then
 			timeout=10
 		fi
 	fi
 
+	uuid=$(container_uuid)
 	if ocf_is_true "$OCF_RESKEY_force_kill"; then
-		ocf_run docker kill $CONTAINER
+		ocf_log info "Killing container, ${CONTAINER}."
+		ocf_run rkt stop --force $uuid
 	else
-		ocf_log debug "waiting $timeout second[s] before killing container"
-		ocf_run docker stop -t=$timeout $CONTAINER
+		ocf_log info "Stopping container, ${CONTAINER}."
+		ocf_run rkt stop $uuid
 	fi
 
 	if [ $? -ne 0 ]; then
@@ -349,6 +363,23 @@ docker_stop()
 		return $OCF_ERR_GENERIC
 	fi
 
+	while [ $timeout -gt 0 ]; do
+		rkt_simple_status
+		if [ $? -eq $OCF_NOT_RUNNING ]; then
+			break
+		fi
+
+		ocf_log debug "waiting for container to stop"
+		timeout=$(( $timeout - 1 ))
+		sleep 1
+	done
+
+	rkt_simple_status
+	if [ $? -eq $OCF_SUCCESS ]; then
+		ocf_exit_reason "Failed to stop container, ${CONTAINER}."
+		return $OCF_ERR_GENERIC
+	fi
+
 	remove_container
 	if [ $? -ne 0 ]; then
 		ocf_exit_reason "Failed to remove stopped container, ${CONTAINER}, based on image, ${OCF_RESKEY_image}."
@@ -360,24 +391,7 @@ docker_stop()
 
 image_exists()
 {
-	# assume that OCF_RESKEY_name have been validated
-	local IMAGE_NAME="$(echo ${OCF_RESKEY_image} | awk -F':' '{print $1}')"
-
-	# if no tag was specified, use default "latest"
-	local COLON_FOUND=0
-	local IMAGE_TAG="latest"
-
-	COLON_FOUND="$(echo "${OCF_RESKEY_image}" | grep -o ':' | grep -c .)"
-
-	if [ ${COLON_FOUND} -ne 0 ]; then
-		IMAGE_TAG="$(echo ${OCF_RESKEY_image} | awk -F':' '{print $NF}')"
-	fi
-
-	# IMAGE_NAME might be following formats:
-	# - image
-	# - repository/image
-	# - docker.io/image (some distro will display "docker.io/" as prefix)
-	docker images | awk '{print $1 ":" $2}' | egrep -q -s "^(docker.io\/)?${IMAGE_NAME}:${IMAGE_TAG}\$"
+	rkt image list --no-legend | awk -v I=${OCF_RESKEY_image} '$2 == I {exit 0} ENDFILE {exit 1}'
 	if [ $? -eq 0 ]; then
 		# image found
 		return 0
@@ -388,24 +402,24 @@ image_exists()
 		ocf_log notice "Image (${OCF_RESKEY_image}) does not exist locally but will be pulled during start"
 		return 0
 	fi
+
 	# image not found.
 	return 1
 }
 
-docker_validate()
+rkt_validate()
 {
-	check_binary docker
+	check_binary rkt
+	check_binary systemd-run
+
 	if [ -z "$OCF_RESKEY_image" ]; then
 		ocf_exit_reason "'image' option is required"
 		exit $OCF_ERR_CONFIGURED
 	fi
 
-	if [ -n "$OCF_RESKEY_monitor_cmd" ]; then
-		docker exec --help >/dev/null 2>&1
-		if [ ! $? ]; then
-			ocf_log info "checking for nsenter, which is required when 'monitor_cmd' is specified"
-			check_binary nsenter
-		fi
+	if echo ${CONTAINER} | grep -q [^a-z0-9-]; then
+		ocf_exit_reason "'name' must contain only lower case alphanumeric characters and -"
+		exit $OCF_ERR_CONFIGURED
 	fi
 
 	image_exists
@@ -434,37 +448,29 @@ if ocf_is_true "$OCF_RESKEY_CRM_meta_globally_unique"; then
 			exit $OCF_ERR_CONFIGURED
 		fi
 	fi
-	: ${OCF_RESKEY_name=`echo ${OCF_RESOURCE_INSTANCE} | tr ':' '-'`} 
-else 
+	: ${OCF_RESKEY_name=`echo ${OCF_RESOURCE_INSTANCE} | tr ':' '-'`}
+else
 	: ${OCF_RESKEY_name=${OCF_RESOURCE_INSTANCE}}
 fi
 
-if [ -n "$OCF_RESKEY_container" ]; then
-	# we'll keep the container attribute around for a bit in order not to break
-	# any existing deployments. The 'name' attribute is prefered now though.
-	CONTAINER=$OCF_RESKEY_container
-	ocf_log warn "The 'container' attribute is depreciated"
-else
-	CONTAINER=$OCF_RESKEY_name
-fi
+CONTAINER=$OCF_RESKEY_name
 
 case $__OCF_ACTION in
 meta-data) meta_data
 		exit $OCF_SUCCESS;;
 start)
-	docker_validate
-	docker_start;;
-stop)		docker_stop;;
-monitor)	docker_monitor;;
-validate-all)	docker_validate;;
-usage|help)	docker_usage
+	rkt_validate
+	rkt_start;;
+stop)		rkt_stop;;
+monitor)	rkt_monitor;;
+validate-all)	rkt_validate;;
+usage|help)	rkt_usage
 		exit $OCF_SUCCESS
 		;;
-*)		docker_usage
+*)		rkt_usage
 		exit $OCF_ERR_UNIMPLEMENTED
 		;;
 esac
 rc=$?
 ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
 exit $rc
-
diff --git a/heartbeat/sapdb.sh b/heartbeat/sapdb.sh
index 836250a..66e9854 100755
--- a/heartbeat/sapdb.sh
+++ b/heartbeat/sapdb.sh
@@ -360,7 +360,7 @@ then
          ;;
     SYB) export OCF_RESKEY_MONITOR_SERVICES="Server"
          ;;
-    HDB) export OCF_RESKEY_MONITOR_SERVICES="hdbindexserver"
+    HDB) export OCF_RESKEY_MONITOR_SERVICES="hdbindexserver|hdbnameserver"
          ;;
   esac
 fi
diff --git a/heartbeat/sg_persist b/heartbeat/sg_persist
index 69866e6..7800474 100755
--- a/heartbeat/sg_persist
+++ b/heartbeat/sg_persist
@@ -415,10 +415,10 @@ sg_persist_action_stop() {
 
 sg_persist_action_monitor() {
 
-    ACT_MASTER_SCORE=`$MASTER_SCORE_ATTRIBUTE --query --quiet 2>&1`
+    ACT_MASTER_SCORE=`$MASTER_SCORE_ATTRIBUTE --query --quiet 2>/dev/null`
     ocf_log debug "$RESOURCE monitor: ACT_MASTER_SCORE=$ACT_MASTER_SCORE"
     
-    ACT_PENDING=`$PENDING_ATTRIBUTE --query --quiet 2>&1`
+    ACT_PENDING=`$PENDING_ATTRIBUTE --query --quiet 2>/dev/null`
     ocf_log debug "$RESOURCE monitor: ACT_PENDING=$ACT_PENDING"
 
     sg_persist_parse_act_pending
diff --git a/heartbeat/varnish b/heartbeat/varnish
index 84672c7..36ea4a6 100755
--- a/heartbeat/varnish
+++ b/heartbeat/varnish
@@ -25,7 +25,11 @@
 #   OCF_RESKEY_backend_type
 #   OCF_RESKEY_backend_size
 #   OCF_RESKEY_backend_file
-#   OCF_RESKEY_worker_threads
+#   OCF_RESKEY_thread_pools
+#   OCF_RESKEY_thread_pool_min
+#   OCF_RESKEY_thread_pool_max
+#   OCF_RESKEY_thread_pool_timeout
+#   OCF_RESKEY_secret
 #
 #######################################################################
 # Initialization:
@@ -49,9 +53,13 @@ OCF_RESKEY_varnish_group_default=varnish
 OCF_RESKEY_backend_type_default=malloc
 OCF_RESKEY_backend_size_default=1G
 OCF_RESKEY_backend_file_default=/var/lib/varnish/${OCF_RESKEY_name}.bin
-OCF_RESKEY_worker_threads_default=100,3000,120
+OCF_RESKEY_thread_pools_default=2
+OCF_RESKEY_thread_pool_min_default=100
+OCF_RESKEY_thread_pool_max_default=3000
+OCF_RESKEY_thread_pool_timeout_default=120
 OCF_RESKEY_maxfiles_default=131072
 OCF_RESKEY_max_locked_memory_default=82000
+OCF_RESKEY_secret_default=/etc/varnish/secret
 
 : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
 : ${OCF_RESKEY_client_binary=${OCF_RESKEY_client_binary_default}}
@@ -63,10 +71,13 @@ OCF_RESKEY_max_locked_memory_default=82000
 : ${OCF_RESKEY_backend_type=${OCF_RESKEY_backend_type_default}}
 : ${OCF_RESKEY_backend_size=${OCF_RESKEY_backend_size_default}}
 : ${OCF_RESKEY_backend_file=${OCF_RESKEY_backend_file_default}}
-: ${OCF_RESKEY_worker_threads=${OCF_RESKEY_worker_threads_default}}
+: ${OCF_RESKEY_thread_pools=${OCF_RESKEY_thread_pools_default}}
+: ${OCF_RESKEY_thread_pool_min=${OCF_RESKEY_thread_pool_min_default}}
+: ${OCF_RESKEY_thread_pool_max=${OCF_RESKEY_thread_pool_max_default}}
+: ${OCF_RESKEY_thread_pool_timeout=${OCF_RESKEY_thread_pool_timeout_default}}
 : ${OCF_RESKEY_maxfiles=${OCF_RESKEY_maxfiles_default}}
 : ${OCF_RESKEY_max_locked_memory=${OCF_RESKEY_max_locked_memory_default}}
-
+: ${OCF_RESKEY_secret=${OCF_RESKEY_secret_default}}
 
 meta_data() {
 	cat <<END
@@ -184,16 +195,41 @@ For example /var/lib/varnish/mybackend.bin
 <content type="string" default="${OCF_RESKEY_backend_file_default}" />
 </parameter>
 
-<parameter name="worker_threads">
+<parameter name="threads_pools">
+<longdesc lang="en">
+Number of worker thread pools.
+Each pool has the minimum, maximum and timeout values configured in the
+thread_pool_min, thread_pool_max and thread_pool_timeout parameters
+</longdesc>
+<shortdesc lang="en">Worker thread pools</shortdesc>
+<content type="string" default="${OCF_RESKEY_thread_pools_default}" />
+</parameter>
+
+<parameter name="thread_pool_min">
+<longdesc lang="en">
+Start  at  least  min but no more than max worker 
+threads with the specified idle timeout in each pool.
+</longdesc>
+<shortdesc lang="en">Minimum worker threads</shortdesc>
+<content type="string" default="${OCF_RESKEY_thread_pool_min_default}" />
+</parameter>
+
+<parameter name="thread_pool_max">
 <longdesc lang="en">
 Start  at  least  min but no more than max worker 
-threads with the specified idle timeout.
+threads with the specified idle timeout in each pool.
+</longdesc>
+<shortdesc lang="en">Maximum worker threads</shortdesc>
+<content type="string" default="${OCF_RESKEY_thread_pool_max_default}" />
+</parameter>
 
-Syntax: min[,max[,timeout]]
-For example: 100,3000,120
+<parameter name="thread_pool_timeout">
+<longdesc lang="en">
+Start  at  least  min but no more than max worker 
+threads with the specified idle timeout in each pool.
 </longdesc>
-<shortdesc lang="en">Worker threads</shortdesc>
-<content type="string" default="${OCF_RESKEY_worker_threads_default}" />
+<shortdesc lang="en">Worker threads timeout</shortdesc>
+<content type="string" default="${OCF_RESKEY_thread_pool_timeout_default}" />
 </parameter>
 
 <parameter name="client_binary">
@@ -221,6 +257,14 @@ Locked shared memory limit (for ulimit -l)
 <content type="string" default="${OCF_RESKEY_max_locked_memory_default}" />
 </parameter>
 
+<parameter name="secret">
+<longdesc lang="en">
+Path to a file containing a secret used for authorizing access to the management port.
+</longdesc>
+<shortdesc lang="en">Path of the secret file</shortdesc>
+<content type="string" default="${OCF_RESKEY_secret}" />
+</parameter>
+
 </parameters>
 
 <actions>
@@ -264,7 +308,7 @@ varnish_status() {
             ocf_log info "Varnish is running"
             # check if the child process is started and varnish is
             # reporting child status as ok
-            ocf_run $OCF_RESKEY_client_binary -T $OCF_RESKEY_mgmt_address status
+            ocf_run $OCF_RESKEY_client_binary -T $OCF_RESKEY_mgmt_address -S $OCF_RESKEY_secret status
             v_rc=$?
             if [ "$v_rc" -eq 0 ]; then
                 ocf_log info "Varnish child reported running"
@@ -334,8 +378,12 @@ varnish_start() {
         -t $OCF_RESKEY_ttl \
         -u $OCF_RESKEY_varnish_user \
         -g $OCF_RESKEY_varnish_group \
-        -w $OCF_RESKEY_worker_threads \
+        -p thread_pools=$OCF_RESKEY_thread_pools \
+        -p thread_pool_min=$OCF_RESKEY_thread_pool_min \
+        -p thread_pool_max=$OCF_RESKEY_thread_pool_max \
+        -p thread_pool_timeout=$OCF_RESKEY_thread_pool_timeout \
         -s $OCF_RESKEY_backend_type,$backend_options \
+        -S $OCF_RESKEY_secret \
         -n $OCF_RESKEY_name
     rc=$?
     if [ $rc -ne 0 ]; then
diff --git a/resource-agents.spec.in b/resource-agents.spec.in
index d87364d..bb90fd3 100644
--- a/resource-agents.spec.in
+++ b/resource-agents.spec.in
@@ -42,7 +42,7 @@ Version:	@version@
 Release:	@specver@%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}
 License:	GPLv2+ and LGPLv2+
 URL:		https://github.com/ClusterLabs/resource-agents
-%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
+%if 0%{?fedora} || 0%{?centos_ver} || 0%{?rhel}
 Group:		System Environment/Base
 %else
 Group:		Productivity/Clustering/HA
@@ -60,7 +60,7 @@ BuildRequires: perl python-devel
 BuildRequires: libxslt glib2-devel
 BuildRequires: which
 
-%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
+%if 0%{?fedora} || 0%{?centos_ver} || 0%{?rhel}
 BuildRequires: cluster-glue-libs-devel
 BuildRequires: docbook-style-xsl docbook-dtds
 %if 0%{?rhel} == 0
@@ -80,21 +80,47 @@ BuildRequires:  libxslt docbook_4 docbook-xsl-stylesheets
 
 ## Runtime deps
 # system tools shared by several agents
-Requires: /bin/bash /bin/grep /bin/sed /bin/gawk
-Requires: /bin/ps /usr/bin/pkill /bin/hostname /bin/netstat
+%if 0%{?centos_ver} > 6 || 0%{?rhel} > 6
+Requires: /usr/bin/bash /usr/bin/gawk
+Requires: /usr/bin/ps
+Requires: /usr/sbin/fuser /usr/bin/mount
+%else
+Requires: /bin/bash /bin/gawk
+Requires: /bin/ps
 Requires: /sbin/fuser /bin/mount
+%endif
+Requires: /bin/grep /bin/sed
+Requires: /usr/bin/pkill /bin/hostname /bin/netstat
 
 # Filesystem / fs.sh / netfs.sh
+%if 0%{?centos_ver} > 6 || 0%{?rhel} > 6
+Requires: /usr/sbin/fsck
+Requires: /usr/sbin/fsck.ext2 /usr/sbin/fsck.ext3 /usr/sbin/fsck.ext4
+Requires: /usr/sbin/fsck.xfs
+Requires: /usr/sbin/mount.cifs
+%else
 Requires: /sbin/fsck
 Requires: /sbin/fsck.ext2 /sbin/fsck.ext3 /sbin/fsck.ext4
+%if 0%{?rhel} == 0
 Requires: /sbin/fsck.xfs
-Requires: /sbin/mount.nfs /sbin/mount.nfs4 /sbin/mount.cifs
+%endif
+Requires: /sbin/mount.cifs
+%endif
+Requires: /sbin/mount.nfs /sbin/mount.nfs4
 
 # IPaddr2
+%if 0%{?centos_ver} > 6 || 0%{?rhel} > 6
+Requires: /usr/sbin/ip
+%else
 Requires: /sbin/ip
+%endif
 
 # LVM / lvm.sh
+%if 0%{?centos_ver} > 6 || 0%{?rhel} > 6
+Requires: /usr/sbin/lvm
+%else
 Requires: /sbin/lvm
+%endif
 
 # nfsserver / netfs.sh
 Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd
@@ -119,14 +145,14 @@ service managers.
 %package -n ldirectord
 License:	GPLv2+
 Summary:	A Monitoring Daemon for Maintaining High Availability Resources
-%if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel}
+%if 0%{?fedora} || 0%{?centos_ver} || 0%{?rhel}
 Group:		System Environment/Daemons
 %else
 Group:		Productivity/Clustering/HA
 %endif
 Obsoletes:	heartbeat-ldirectord <= %{version}
 Provides:	heartbeat-ldirectord = %{version}
-%if 0%{?fedora} > 18 || 0%{?centos_version} > 6 || 0%{?rhel} > 6
+%if 0%{?fedora} > 18 || 0%{?centos_ver} > 6 || 0%{?rhel} > 6
 BuildRequires: perl-podlators
 %endif
 Requires:       %{SSLeay} perl-libwww-perl perl-MailTools
@@ -154,7 +180,7 @@ See 'ldirectord -h' and linux-ha/doc/ldirectord for more information.
 %endif
 
 %prep
-%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0
+%if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_ver} == 0 && 0%{?rhel} == 0
 %{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.}
 exit 1
 %endif
@@ -165,7 +191,7 @@ if [ ! -f configure ]; then
 	./autogen.sh
 fi
 
-%if 0%{?fedora} >= 11 || 0%{?centos_version} > 5 || 0%{?rhel} > 5
+%if 0%{?fedora} >= 11 || 0%{?centos_ver} > 5 || 0%{?rhel} > 5
 CFLAGS="$(echo '%{optflags}')"
 %global conf_opt_fatal "--enable-fatal-warnings=no"
 %else
@@ -191,6 +217,9 @@ export CFLAGS
 %if %{defined _unitdir}
     --with-systemdsystemunitdir=%{_unitdir} \
 %endif
+%if %{defined _tmpfilesdir}
+    --with-systemdtmpfilesdir=%{_tmpfilesdir} \
+%endif
 	--with-pkg-name=%{name} \
 	--with-ras-set=%{rasset}
 
@@ -253,6 +282,13 @@ rm -rf %{buildroot}
 /usr/lib/ocf/resource.d/redhat
 %endif
 
+%if %{defined _unitdir}
+%{_unitdir}/resource-agents-deps.target
+%endif
+%if %{defined _tmpfilesdir}
+%{_tmpfilesdir}/%{name}.conf
+%endif
+
 %dir %{_datadir}/%{name}
 %dir %{_datadir}/%{name}/ocft
 %{_datadir}/%{name}/ocft/configs
diff --git a/rgmanager/src/resources/Makefile.am b/rgmanager/src/resources/Makefile.am
index de88c69..30b3be9 100644
--- a/rgmanager/src/resources/Makefile.am
+++ b/rgmanager/src/resources/Makefile.am
@@ -68,7 +68,7 @@ rngdir			= ${CLUSTERDATA}/relaxng
 rng_DATA		= $(DTD) $(XSL) $(RESRNG)
 
 $(TARGET):
-	cat $@.in | sed \
+	cat $(abs_srcdir)/$@.in | sed \
 		-e 's#@''LOGDIR@#${LOGDIR}#g' \
 	> $@.out
 	chmod +x $@.out
diff --git a/systemd/Makefile.am b/systemd/Makefile.am
new file mode 100644
index 0000000..f783e0f
--- /dev/null
+++ b/systemd/Makefile.am
@@ -0,0 +1,25 @@
+#
+# Copyright (C) 2017 Oyvind Albrigtsen
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+
+MAINTAINERCLEANFILES    = Makefile.in
+
+if HAVE_SYSTEMD
+dist_systemdsystemunit_DATA = resource-agents-deps.target
+
+dist_systemdtmpfiles_DATA = resource-agents.conf
+endif
diff --git a/systemd/resource-agents-deps.target b/systemd/resource-agents-deps.target
new file mode 100644
index 0000000..1c4fdff
--- /dev/null
+++ b/systemd/resource-agents-deps.target
@@ -0,0 +1,2 @@
+[Unit]
+Description=resource-agents dependencies
diff --git a/systemd/resource-agents.conf b/systemd/resource-agents.conf
new file mode 100644
index 0000000..1cb129c
--- /dev/null
+++ b/systemd/resource-agents.conf
@@ -0,0 +1 @@
+d /var/run/resource-agents/ 1755 root root
diff --git a/tools/send_arp.libnet.c b/tools/send_arp.libnet.c
index 12fe7f1..7fdfb06 100644
--- a/tools/send_arp.libnet.c
+++ b/tools/send_arp.libnet.c
@@ -69,7 +69,7 @@ static char print_usage[]={
 "  where:\n"
 "    repeatinterval-ms: timing, in milliseconds of sending arp packets\n"
 "      For each ARP announcement requested, a pair of ARP packets is sent,\n"
-"      an ARP request, and an ARP reply. This is becuse some systems\n"
+"      an ARP request, and an ARP reply. This is because some systems\n"
 "      ignore one or the other, and this combination gives the greatest\n"
 "      chance of success.\n"
 "\n"
@@ -81,7 +81,7 @@ static char print_usage[]={
 "\n"
 "    pidfile: pid file to use\n"
 "\n"
-"    device: netowrk interace to use\n"
+"    device: network interface to use\n"
 "\n"
 "    src_ip_addr: source ip address\n"
 "\n"
diff --git a/tools/send_arp.linux.c b/tools/send_arp.linux.c
index 477100a..2aa9b5d 100644
--- a/tools/send_arp.linux.c
+++ b/tools/send_arp.linux.c
@@ -134,6 +134,43 @@ static socklen_t sll_len(size_t halen)
 
 #define SLL_LEN(hln)		sll_len(hln)
 
+#if 1 /* hb_mode: always print hb_mode usage in this binary */
+static char print_usage[]={
+"send_arp: sends out custom ARP packet.\n"
+"  usage: send_arp [-i repeatinterval-ms] [-r repeatcount] [-p pidfile] \\\n"
+"              device src_ip_addr src_hw_addr broadcast_ip_addr netmask\n"
+"\n"
+"  where:\n"
+"    repeatinterval-ms: ignored\n"
+"\n"
+"    repeatcount: how many ARP packets to send.\n"
+"\n"
+"    pidfile: pid file to use\n"
+"\n"
+"    device: network interface to use\n"
+"\n"
+"    src_ip_addr: source ip address\n"
+"\n"
+"    src_hw_addr: only \"auto\" is supported.\n"
+"                 If other specified, it will exit without sending any ARP packets.\n"
+"\n"
+"    broadcast_ip_addr: ignored\n"
+"\n"
+"    netmask: ignored\n"
+"\n"
+"  Notes: Other options of iputils-arping may be accepted but it's not\n"
+"         intended to be supported in this binary.\n"
+"\n"
+};
+
+void usage(void)
+{
+	fprintf(stderr, "%s\n", print_usage);
+	exit(2);
+}
+
+#else /* hb_mode */
+
 void usage(void)
 {
 	fprintf(stderr,
@@ -157,6 +194,7 @@ void usage(void)
 		);
 	exit(2);
 }
+#endif /* hb_mode */
 
 static void set_signal(int signo, void (*handler)(void))
 {
@@ -1113,6 +1151,11 @@ main(int argc, char **argv)
 	    unsolicited = 1;
 	    device.name = argv[optind];
 	    target = argv[optind+1];
+            if (strcmp(argv[optind+2], "auto")) {
+		fprintf(stderr, "send_arp.linux: Gratuitous ARPs are not sent in the Cluster IP configuration\n");
+                /* return success to suppress an error log by the RA */
+		exit(0);
+            }
 
 	} else {
 	    argc -= optind;

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/resource-agents.git



More information about the Debian-HA-Commits mailing list