[Pkg-ceph-commits] [ceph] 01/03: Imported Upstream version 10.2.0

James Downing Page jamespage at moszumanska.debian.org
Fri Apr 22 10:23:37 UTC 2016


This is an automated email from the git hooks/post-receive script.

jamespage pushed a commit to branch ubuntu-xenial
in repository ceph.

commit cef0bf71fabffccb9c400f3445c10d9143525b74
Author: James Page <james.page at ubuntu.com>
Date:   Thu Apr 21 19:53:13 2016 +0100

    Imported Upstream version 10.2.0
---
 AUTHORS                                            |   9 +-
 ChangeLog                                          | 193 +++++++++++++++++++-
 ceph.spec                                          |   3 +-
 ceph.spec.in                                       |   1 +
 configure                                          |  20 +-
 configure.ac                                       |   2 +-
 doc/man/8/ceph-deploy.rst                          |   2 +-
 man/ceph-authtool.8                                |   2 +-
 man/ceph-clsinfo.8                                 |   2 +-
 man/ceph-conf.8                                    |   2 +-
 man/ceph-create-keys.8                             |   2 +-
 man/ceph-debugpack.8                               |   2 +-
 man/ceph-dencoder.8                                |   2 +-
 man/ceph-deploy.8                                  |   4 +-
 man/ceph-detect-init.8                             |   2 +-
 man/ceph-disk.8                                    |   2 +-
 man/ceph-fuse.8                                    |   2 +-
 man/ceph-mds.8                                     |   2 +-
 man/ceph-mon.8                                     |   2 +-
 man/ceph-osd.8                                     |   2 +-
 man/ceph-post-file.8                               |   2 +-
 man/ceph-rbdnamer.8                                |   2 +-
 man/ceph-rest-api.8                                |   2 +-
 man/ceph-run.8                                     |   2 +-
 man/ceph-syn.8                                     |   2 +-
 man/ceph.8                                         |   2 +-
 man/cephfs.8                                       |   2 +-
 man/crushtool.8                                    |   2 +-
 man/librados-config.8                              |   2 +-
 man/monmaptool.8                                   |   2 +-
 man/mount.ceph.8                                   |   2 +-
 man/osdmaptool.8                                   |   2 +-
 man/rados.8                                        |   2 +-
 man/radosgw-admin.8                                |   2 +-
 man/radosgw.8                                      |   2 +-
 man/rbd-fuse.8                                     |   2 +-
 man/rbd-mirror.8                                   |   2 +-
 man/rbd-nbd.8                                      |   2 +-
 man/rbd-replay-many.8                              |   2 +-
 man/rbd-replay-prep.8                              |   2 +-
 man/rbd-replay.8                                   |   2 +-
 man/rbd.8                                          |   2 +-
 man/rbdmap.8                                       |   2 +-
 .../erasure-code/encode-decode-non-regression.sh   |   2 +-
 src/.git_version                                   |   4 +-
 src/Makefile.am                                    |   6 +
 src/Makefile.in                                    |   8 +
 src/common/WeightedPriorityQueue.h                 | 178 +++++++++++++-----
 src/common/WorkQueue.h                             |   5 +
 src/common/buffer.cc                               |  15 +-
 src/common/cohort_lru.h                            |   2 +
 src/common/config_opts.h                           |   4 +
 src/include/buffer.h                               |   2 +
 src/include/rados/buffer.h                         |   2 +
 src/init-ceph.in                                   |  10 +
 src/journal/JournalPlayer.cc                       |  15 +-
 src/journal/JournalPlayer.h                        |  10 +-
 src/journal/ObjectPlayer.cc                        |  32 +++-
 src/librbd/AioImageRequestWQ.cc                    |  81 ++++++---
 src/librbd/AioImageRequestWQ.h                     |   3 +
 src/librbd/ExclusiveLock.cc                        |  87 ++++++---
 src/librbd/ExclusiveLock.h                         |   5 +-
 src/librbd/ImageWatcher.cc                         |  20 +-
 src/librbd/Operations.cc                           |  24 ++-
 src/librbd/exclusive_lock/ReleaseRequest.cc        |  44 ++---
 src/librbd/exclusive_lock/ReleaseRequest.h         |  10 +-
 src/librbd/image/CloseRequest.cc                   |  21 ++-
 src/librbd/image/RefreshRequest.cc                 |  60 +++++-
 src/librbd/image/RefreshRequest.h                  |   8 +
 src/librbd/internal.cc                             |  15 ++
 src/mds/FSMap.cc                                   |   6 +-
 src/msg/async/AsyncConnection.cc                   |  32 ++--
 src/msg/async/AsyncConnection.h                    |   9 +-
 src/msg/simple/DispatchQueue.cc                    |   9 +
 src/msg/xio/XioConnection.cc                       |  62 +++----
 src/msg/xio/XioConnection.h                        |   4 +-
 src/msg/xio/XioMessenger.cc                        |   6 +-
 src/msg/xio/XioPortal.h                            |   6 +-
 src/os/bluestore/BlueStore.cc                      |  35 ++--
 src/os/filestore/LFNIndex.cc                       |   4 +-
 src/osd/OSD.cc                                     |  38 +++-
 src/osd/ReplicatedPG.cc                            |   8 +-
 src/osd/ReplicatedPG.h                             |   2 +-
 src/osd/Watch.cc                                   |  14 +-
 src/osd/Watch.h                                    |   5 +-
 src/pybind/Makefile.am                             |   3 +
 src/pybind/cephfs/setup.py                         |   1 -
 src/pybind/rados/rados.pyx                         |  12 +-
 src/pybind/rados/setup.py                          |   1 -
 src/pybind/rbd/setup.py                            |   1 -
 src/rgw/rgw_acl_s3.cc                              |   2 +-
 src/rgw/rgw_admin.cc                               |  67 +++++--
 src/rgw/rgw_common.h                               |   4 +
 src/rgw/rgw_coroutine.cc                           |  18 ++
 src/rgw/rgw_coroutine.h                            |  15 +-
 src/rgw/rgw_cors_s3.cc                             |   2 +-
 src/rgw/rgw_cr_rados.cc                            |   6 +-
 src/rgw/rgw_cr_rados.h                             |  31 ++--
 src/rgw/rgw_cr_rest.h                              |  14 ++
 src/rgw/rgw_file.cc                                | 201 ++++++++++++++++-----
 src/rgw/rgw_file.h                                 | 126 +++++++++++--
 src/rgw/rgw_op.cc                                  |  59 +++---
 src/rgw/rgw_op.h                                   |  27 ++-
 src/rgw/rgw_rados.cc                               |  48 ++---
 src/rgw/rgw_rados.h                                |  17 +-
 src/rgw/rgw_realm_reloader.cc                      |   4 +
 src/rgw/rgw_realm_watcher.cc                       |   7 +-
 src/rgw/rgw_rest_conn.cc                           |   6 +-
 src/rgw/rgw_rest_log.cc                            |  22 ++-
 src/rgw/rgw_rest_s3.cc                             |  67 ++++---
 src/rgw/rgw_sync.h                                 |   2 -
 src/rgw/rgw_user.h                                 |   2 +
 src/test/bufferlist.cc                             |  18 ++
 src/test/centos-6/ceph.spec.in                     |   1 +
 src/test/centos-7/ceph.spec.in                     |   1 +
 src/test/ceph_objectstore_tool.py                  |  91 +++++-----
 src/test/cephtool-test-mds.sh                      |   6 +-
 src/test/cephtool-test-mon.sh                      |   6 +-
 src/test/cephtool-test-osd.sh                      |   6 +-
 src/test/cephtool-test-rados.sh                    |   4 +-
 src/test/common/test_async_compressor.cc           |   4 +
 src/test/common/test_blkdev.cc                     |  18 +-
 src/test/common/test_weighted_priority_queue.cc    |  38 ++++
 src/test/compressor/test_compression_plugin.cc     |  16 +-
 .../compressor/test_compression_plugin_snappy.cc   |  11 +-
 .../compressor/test_compression_plugin_zlib.cc     |  12 +-
 src/test/compressor/test_compression_snappy.cc     |   4 +-
 src/test/compressor/test_compression_zlib.cc       |   9 +-
 src/test/encoding/check-generated.sh               |  36 ++--
 src/test/encoding/readable.sh                      |  10 +-
 src/test/erasure-code/TestErasureCode.cc           |   5 +-
 src/test/erasure-code/TestErasureCodeExample.cc    |   5 +-
 src/test/erasure-code/TestErasureCodeIsa.cc        |   5 +-
 src/test/erasure-code/TestErasureCodeJerasure.cc   |   5 +-
 src/test/erasure-code/TestErasureCodeLrc.cc        |  13 +-
 src/test/erasure-code/TestErasureCodePlugin.cc     |   8 +-
 src/test/erasure-code/TestErasureCodePluginIsa.cc  |   5 +-
 .../erasure-code/TestErasureCodePluginJerasure.cc  |   5 +-
 src/test/erasure-code/TestErasureCodePluginLrc.cc  |   5 +-
 src/test/erasure-code/TestErasureCodePluginShec.cc |   5 +-
 src/test/erasure-code/TestErasureCodeShec.cc       |   5 +-
 src/test/erasure-code/TestErasureCodeShec_all.cc   |   5 +-
 .../erasure-code/TestErasureCodeShec_arguments.cc  |   5 +-
 .../erasure-code/TestErasureCodeShec_thread.cc     |   5 +-
 src/test/erasure-code/ceph_erasure_code.cc         |   4 +-
 .../ceph_erasure_code_non_regression.cc            |   5 +-
 src/test/erasure-code/test-erasure-code.sh         |  82 ++++-----
 src/test/erasure-code/test-erasure-eio.sh          |  26 +--
 src/test/fedora-21/ceph.spec.in                    |   1 +
 src/test/librados_test_stub/TestClassHandler.cc    |   8 +-
 src/test/libradosstriper/rados-striper.sh          |   2 +-
 .../exclusive_lock/test_mock_ReleaseRequest.cc     |   5 +-
 src/test/librbd/image/test_mock_RefreshRequest.cc  |  14 ++
 src/test/librbd/journal/test_Replay.cc             |  27 ++-
 src/test/librgw_file_nfsns.cc                      |  12 +-
 src/test/mon/misc.sh                               |   2 +-
 src/test/mon/mkfs.sh                               |  32 ++--
 src/test/mon/mon-created-time.sh                   |   8 +-
 src/test/mon/mon-handle-forward.sh                 |  18 +-
 src/test/mon/mon-ping.sh                           |   4 +-
 src/test/mon/mon-scrub.sh                          |   4 +-
 src/test/mon/osd-crush.sh                          | 130 ++++++-------
 src/test/mon/osd-erasure-code-profile.sh           |  92 +++++-----
 src/test/mon/osd-pool-create.sh                    | 144 +++++++--------
 src/test/opensuse-13.2/ceph.spec.in                |   1 +
 src/test/osd/TestRados.cc                          |   5 +-
 src/test/osd/osd-bench.sh                          |  18 +-
 src/test/osd/osd-config.sh                         |  26 +--
 src/test/osd/osd-copy-from.sh                      |  20 +-
 src/test/osd/osd-markdown.sh                       |  15 +-
 src/test/osd/osd-reactivate.sh                     |   2 +-
 src/test/osd/osd-reuse-id.sh                       |   2 +-
 src/test/osd/osd-scrub-repair.sh                   |   2 +-
 src/test/osd/osd-scrub-snaps.sh                    |   2 +-
 src/test/pybind/test_ceph_argparse.py              |   3 +-
 src/test/rbd_mirror/image_replay.cc                |   5 +-
 src/test/test-ceph-helpers.sh                      |   2 +-
 src/test/test_objectstore_memstore.sh              |   2 +-
 src/test/test_pidfile.sh                           |   2 +-
 src/tools/ceph-monstore-update-crush.sh            |  16 +-
 src/tools/rbd/Utils.cc                             |  58 +++++-
 src/tools/rbd/Utils.h                              |  12 +-
 src/tools/rbd/action/BenchWrite.cc                 |   4 +-
 src/tools/rbd/action/Children.cc                   |   2 +-
 src/tools/rbd/action/Clone.cc                      |   4 +-
 src/tools/rbd/action/Copy.cc                       |   5 +-
 src/tools/rbd/action/Create.cc                     |   2 +-
 src/tools/rbd/action/Diff.cc                       |   3 +-
 src/tools/rbd/action/DiskUsage.cc                  |   2 +-
 src/tools/rbd/action/Export.cc                     |   3 +-
 src/tools/rbd/action/ExportDiff.cc                 |   3 +-
 src/tools/rbd/action/Feature.cc                    |   2 +-
 src/tools/rbd/action/Flatten.cc                    |   2 +-
 src/tools/rbd/action/ImageMeta.cc                  |   8 +-
 src/tools/rbd/action/Import.cc                     |   6 +-
 src/tools/rbd/action/ImportDiff.cc                 |   2 +-
 src/tools/rbd/action/Info.cc                       |   3 +-
 src/tools/rbd/action/Kernel.cc                     |   5 +-
 src/tools/rbd/action/Lock.cc                       |   6 +-
 src/tools/rbd/action/MirrorImage.cc                |   8 +-
 src/tools/rbd/action/Nbd.cc                        |   3 +-
 src/tools/rbd/action/ObjectMap.cc                  |   3 +-
 src/tools/rbd/action/Remove.cc                     |   2 +-
 src/tools/rbd/action/Rename.cc                     |   4 +-
 src/tools/rbd/action/Resize.cc                     |   2 +-
 src/tools/rbd/action/Snap.cc                       |  20 +-
 src/tools/rbd/action/Status.cc                     |   2 +-
 src/tools/rbd/action/Watch.cc                      |   2 +-
 src/unittest_bufferlist.sh                         |   2 +-
 src/vstart.sh                                      |  10 +
 systemd/ceph-mds at .service                          |   3 +
 systemd/ceph-mon at .service                          |   3 +
 systemd/ceph-osd at .service                          |   3 +
 systemd/ceph-radosgw at .service                      |   3 +
 214 files changed, 2264 insertions(+), 1035 deletions(-)

diff --git a/AUTHORS b/AUTHORS
index eba7075..880a127 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -2,7 +2,6 @@ Aaron Bassett <abassett at gmail.com>
 Abhishek Dixit <dixitabhi at gmail.com>
 Abhishek Lekshmanan <abhishek.lekshmanan at ril.com>
 Abhishek Lekshmanan <abhishek at suse.com>
-Abhishek Lekshmanan <alekshmanan at suse.com>
 Abhishek Varshney <abhishek.varshney at flipkart.com>
 Accela Zhao <accelazh at gmail.com>
 Adam C. Emerson <aemerson at linuxbox.com>
@@ -152,6 +151,7 @@ Erik Logtenberg <erik at logtenberg.eu>
 Erwan Velu <erwan at redhat.com>
 Erwin, Brock A <Brock.Erwin at pnl.gov>
 Esteban Molina-Estolano <eestolan at lanl.gov>
+Etienne Menguy <etienne.menguy at corp.ovh.com>
 Evan Felix <evan.felix at pnnl.gov>
 Evgeniy Firsov <evgeniy.firsov at sandisk.com>
 Fabio Alessandro Locati <fabiolocati at gmail.com>
@@ -229,7 +229,7 @@ Jean-Rémi Deveaux <jeanremi.deveaux at gmail.com>
 Jeff Epstein <jepst79 at gmail.com>
 Jeffrey Lu <lzhng2000 at aliyun.com>
 Jeff Weber <jweber at cofront.net>
-Jenkins Build Slave User <jenkins-build at trusty-small-unique--a7f82f5f-8832-433e-a632-928924f47e04.localdomain>
+Jenkins Build Slave User <jenkins-build at trusty-small-unique--68a2c286-dc75-4669-822d-28cd109dc3c5.localdomain>
 Jenkins <jenkins at ceph.com>
 Jens-Christian Fischer <jens-christian.fischer at switch.ch>
 Jeremy Qian <vanpire110 at 163.com>
@@ -315,6 +315,7 @@ Luis Pabón <lpabon at redhat.com>
 Luis Periquito <luis.periquito at ocado.com>
 Lukasz Jagiello <lukasz at wikia-inc.com>
 Luo Kexue <luo.kexue at zte.com.cn>
+Luo Runbing <runsisi at zte.com.cn>
 Lu Shi <shi.lu at h3c.com>
 Ma Jianpeng <jianpeng.ma at intel.com>
 Marcel Lauhoff <lauhoff at uni-mainz.de>
@@ -410,6 +411,7 @@ Robin H. Johnson <robin.johnson at dreamhost.com>
 Robin Tang <robintang974 at gmail.com>
 Rohan Mars <code at rohanmars.com>
 Roi Dayan <roid at mellanox.com>
+Roland Mechler <rmechler at cisco.com>
 Roman Haritonov <reclosedev at gmail.com>
 Ron Allred <rallred at itrefined.com>
 Rongze Zhu <zrzhit at gmail.com>
@@ -419,7 +421,6 @@ Ross Turk <rturk at redhat.com>
 Ruben Kerkhof <ruben at rubenkerkhof.com>
 Ruifeng Yang <yangruifeng.09209 at h3c.com>
 runsisi <runsisi at hust.edu.cn>
-runsisi <runsisi at zte.com.cn>
 Rust Shen <rustinpeace at 163.com>
 Rutger ter Borg <rutger at terborg.net>
 Sage Weil <sage at inktank.com>
@@ -518,7 +519,7 @@ Weijun Duan <duanweijun at h3c.com>
 Wei Luo <luowei at yahoo-inc.com>
 Wei Luo <weilluo at tencent.com>
 Wei Qian <weiq at dtdream.com>
-weiqiaomiao <wei.qiaomiao at zte.com.cn>
+Wei Qiaomiao <wei.qiaomiao at zte.com.cn>
 Wenjun Huang <wenjunhuang at tencent.com>
 Wesley Spikes <wesley.spikes at dreamhost.com>
 Wido den Hollander <wido at 42on.com>
diff --git a/ChangeLog b/ChangeLog
index 55473c6..7bdd64d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,15 +1,199 @@
-4a2a6f7 (HEAD, tag: v10.1.2, origin/jewel) 10.1.2
+3a9fba2 (HEAD, tag: v10.2.0, origin/jewel) 10.2.0
+f86f73f rgw: drop rest op reference in error path
+88369e2 doc: rgw multisite, add pools section & minor cosmetic improvements
+7ca01d4 rgw_admin: improve period update errors
+558863a test: set a default $CEPH_ROOT env variable
+ea2c703 rgw: clean async rest ops in calling cr
+2a15418 rgw: drop async cr reference on simple cr completion
+0a361d5 rgw: RGWRealmWatcher::watch_restart() also unwatches watch
+fe0f4bb rgw: stop cr manager in destructor
+f0e3b61 osd: fix noisy debug line
+61acd9d doc/release-notes.rst: clarify ceph UID/GID assignment on SUSE
+b74a9dd doc/release-notes.rst: minor grammar and style corrections
+b5d973f doc/release-notes: expand distro/arch section to include arm64 info
+b2d9467 doc/release-notes: more detail about the ext4 checks
+ad77f45 doc/release-notes: include changes in rcs
+a4ee2b7 doc/release-notes: clean up jewel notes
+451af76 doc/release-notes: fix rados scrub summary
+3108c34 ceph.spec.in: terminate if statement in %pre scriptlet
+b19bac7 rgw: Add a test for multi-tenancy
+4e1f282 common: typo-fix for osd_check_max_object_name_len_on_startup
+17c4bcf rgw: cross rgw request need to send 'zonegroup' http header
+24b1968 rgw: fix get_zonegroup()
+f78af6f doc: add multisite to index
+c2e4106 doc: rgw multisite documentation
+eefcc67 rgw: binfo_cache is now a RGWRados member
+6f638a5 rgw: RGWShardCollectCR does not need a cct member
+0cdf3bc debian: start ceph-mon-all and ceph-osd-all on package install
+c4b7a25 mailmap: Abhishek Lekshmanan affiliation
+a8efd40 mds/FSMap: use _ in key name
+8642b34 osd: some debug output for old osdmap trimming
+02f5885 osd: add option to disable startup check
+8c1a8a6 osd: refuse to start if configure object limits don't work
+a13ae6f vstart.sh: --short to set ext4-friendly object name and namespace limits
+413b3e7 osd/ReplicatedPG: fix implementation of register_on_success()
+f1cfcff mailmap: Jenkins name normalization
+b3991d0 mailmap: Abhishek Lekshmanan name normalization
+9cdba6c mailmap: Wei Qiaomiao affiliation
+a79a9dc mailmap: Luo Runbing affiliation
+28b91f2 cmake: link ceph_test_rados_api_list against libglobal
+8ddfcb12 cmake: Add cython_modules to `make check`
+e0bda0b cmake: remove repeat OSD.cc
+56c37ea cmake: CMake environment variables added to automake
+b4de745 cmake: Building ceph-detect-init from source code
+4fc06be cmake: Building ceph-disk from source code
+a7b6c01 cmake: Add librgw_file tests
+60e5fa9 cmake: Added rbd_mirror targets
+21ec89f cmake: Added an absolute path for libs
+9288e98 cmake: test_pidfile.sh test passing
+de97f67 cmake: Protect againist getenv returning nullptr
+cfcbe21 cmake: erasure-decode-non-regression.sh passing
+fd978ce unittest_journal: got rid of relative paths
+dd0ac99 unittest_blkdev: absolute path for cmake make check
+49da534 cmake: test_objectstore_memstore.sh test passing
+1c8cd00 cmake: erasure-code tests passing
+6596373 cmake: ceph_objectstore_tool.py test passing
+f43dd0f cmake: unittest_bufferlist passing in cmake
+cf24535 cmake: cephtool-test-rados/mds/osd passing in cmake
+005e4b4 cmake: working on cephtool-test-rados.sh test
+d189270 cmake: working on rados-striper.sh test
+8e3e3f4 cmake: librbd fixes
+54eb382 cmake: moved tests into test/system dir
+803def3 cmake: moved tests into test/rgw dir
+798b821 cmake: moved tests into test/osdc dir
+4492439 cmake: moved tests into test/ObjectMap dir
+e7c1459 cmake: moved tests into test/msgr dir
+cfffdf6 cmake: moved tests into test/messenger dir
+99d3436 cmake: moved tests into test/librados_test_stub dir
+eb8e49c cmake: moved tests into test/libcephfs dir
+99d66ca cmake: moved tests into test/journal dir
+997291e cmake: moved tests into test/filestore dir
+02d508c cmake: moved tests into test/compressor dir
+d50478f cmake: moved tests into test/cls_version dir
+e2b0f6f cmake: moved tests into test/cls_statelog dir
+8da608d cmake: moved tests into test/cls_rgw dir
+521b07b cmake: moved tests into test/cls_replica_log dir
+2636c16 cmake: moved tests into test/cls_refcount dir
+fe673ea cmake: moved tests into test/cls_rbd dir
+7271c96 cmake: moved tests into test/cls_numops dir
+de64f82 cmake: moved tests into test/cls_log dir
+0e851de cmake: moved tests into test/cls_lock dir
+6a791fa cmake: moved tests into test/cls_hello dir
+c1f94df cmake: moved tests into test/bench dir
+025f552 cmake: moved tests into test/os dir
+ef8b113 cmake: moved tests into test/pybind dir
+74ea1f9 cmake: moved tests into test/objectstore dir
+852276d cmake: moved tests into test/mds dir
+3fffaa6 cmake: moved tests into test/libradosstriper dir
+83ff750 cmake: moved tests into test/librados dir
+84c999a cmake: moved tests into test/fs dir
+3a10843 cmake: moved tests into test/crush dir
+f65c87a cmake: moved tests into test/common dir
+9be9060 cmake: shell scripts in test/encoding passing in cmake
+5d4d3ff cmake: shell scripts in test/mon passing in cmake
+3186c00 cmake: moved tests into test/osd dir
+59568e5 cmake: encode-decode-non-regression.sh passing in cmake
+e0f400f cmake: test-ceph-helpers working
+a7bd5e4 cmake: Reorganized test dir, added cmake functions
+9634214 release-notes: document rbd cli name validation override
+08c8cee rbd: fail if cannot extract spec from legacy "--image" optional
+08fcc72 rbd: optionally disable spec validation when creating images/snapshots
+e57d7c1 rbd: add support for relaxed image spec validation
+6579c7d common: new rbd image spec validation option
+18ea756 Removed parentheses for if statement.
+d1ed6eb update release-notes manually
+df4e134 msg/simple/DispatchQueue: inject internal delays in special event dispatch
+0b0d584 Fixes for python omap method return values.
+b358c61 FSMap: guard the ever_enabled_multiple decode
+d26322d ceph_test_rados: make long name ~300 chars, (not ~800)
+ecf4572 rgw: aws4: handle UNSIGNED-PAYLOAD under header auth
+769f994 librbd: dynamically disabling exclusive lock might leave lingering request
+ae604de rgw: handle no current_preiod in is_syncing_bucket_meta
+489d062 release-notes: v10.1.2 release notes
+a4819a6 release-notes: v10.1.2 release notes (draft)
+a353eac rgw_file: fixup attrs across renames
+bf299dc rgw: try to use current period id in a few more cases
+c04fd42 osd: fix watch reconnect race
+8166042 osd/Watch: slightly more informative debug output
+0f8585c doc: fix dependencies
+6208437 systemd: Use the same restart limits as upstart
+a201365 rgw: call rgw_log_usage_finalize() on reconfiguration
+4b1d169 test: image replayer needs dummy remote mirror peer uuid
+5230967 rgw: RGWPeriod::reflect() sets master zonegroup as default
+877d44b journal: race possible when re-watching a journal object
+b6297a7 rgw_file: fix silly abuse of flags in fh_lookup
+6651e51 os/bluestore: fix shortened bluefs paths in env mirror case
+fb924f2 rgw_file: don't need to memset Latch
+842828a cohort_lru: define ctor for TreeX::Latch
+8d346ac test/encoding/readable: use [ for "test" not ((
+f30db90 osd: clear requeue_scrub_on_unblock flag after requeue scrub
+760863f journal: potential for double-free of context on shut down
+5b608ed rgw_file: fix 2 attr decoding issues
+11a9722 journal: possible race condition during live replay
+aece824 doc: reinstate accidentally removed section header
+942504d radosgw-admin: 'realm pull' only accepts --url
+70ff086 radosgw-admin: allow 'period pull --url' without full multisite config
+8451c0f librbd: cancel ops before blocking IO
+e90e3d9 librbd: delay invalidation of exclusive lock pointer
+1e6fc45 rbd: cleanly fail bench-write upon failure
+1761a38 librbd: gracefully handle blacklisted client in ImageWatcher
+5d8b656 rgw_file: force move semantics on rvalue refs in emplace_attrs
+b9ce840 rgw_file: remove RGWLibFS::mkdir() (no longer called)
+917a25d rgw_file: fix nfsns unit test when !created and !create
+bd2c681 rgw_file: check for leaf dir objects and restore their state
+9830f95 rgw_file: use emplace_attr in both paths in mkdir2()
+601b193 rgw_file: use emplace_attr in RGWCreateBucket and RGWCopyObj
+7a7de9b rgw_file: use emplace_attr in RGWPostObj and RGWPutMetadataBucket
+8b2616b rgw_file: remove unused label in mkdir2()
+6bda1e0 rgw_file: use emplace_attr in RGWPutObj::execute()
+6a86ed9 rgw_file: encode ux attrs in mkdir2
+8f5ff95 rgw_file: implement new mkdir2 with atomicity
+34ba7c0 rgw_file: add encode/decode hooks, emplace_attr
+2dbefba rgw_file: move internals of rgw_read into RGWLibFS::read(...)
+8b01e12 rgw_file: declare encoder for Unix object attributes
+f87af25 rgw_file:  declare an attribute for all Unix/rgw_file attrs
+4a2a6f7 (tag: v10.1.2) 10.1.2
+489324f doc/configuration/filesystem-recommendations: recommend against ext4
+a8e2869 doc/release-notes: mention ext4 in the release notes
+112649f doc/start/os-recommendations: drop ancient 'syncfs' note
+638fd2e doc/rados/configuration: minor cleanup of inline xattr description
+2601e2f rgw/rgw_rados: use to_str() instead of c_str()
+d56e439 buffer: add list::to_str()
+bcfce59 doc: do not include ext4 as an example
+2c72c9a doc/8/ceph-deploy: do not include ext4 as an example value
+61c4c1c doc: list ext4 last
+ae97840 doc/dev: remove ancient filestore-filesystem-compat doc
+d5bc886 xio: refactor release_xio_req -> release_xio_msg
+262d85c xio: refactor flush_input_queue -> flush_out_queues
+8162218 xio: refactor on_msg_req -> on_msg
+bde87d3 xio: fix invalid access to member pointer before it is being initialized
+5a746e6 xio: use const for input argument in 2 functions' prototype
 8b98556 PG: set epoch_created and parent_split_bits for child pg
+2be6017 pybind/Makefile.am: fix build with ccache
+142610a os/filestore: fix return type mismatch for lfn parse
 bd1c548 test: fix ut test failure caused by lfn change
+8087cfa librbd: do not return a failure if a peer cannot be notified of update
 45219e0 Fixed ceph-common install.
 fd2f455 mds: take standby_for_fscid into account in FSMap::find_unused()
 b6d8c32 librbd: Fixed bug in disabling non-primary image mirroring
+0b98b4f rgw-rados: return RGWSystemMetaObj init directly
 14a66f6 ceph-disk: fix PrepareData.set_type() args should be class member.
 4c203b3 ceph-disk: fix spelling mistake geattr to getattr.
 5b098ca ceph-disk: fix lockbox set_or_create_partition()
 b7708da radosgw-admin: fix name checking
 770846b radosgw-admin: allow setting zone when there is no realm
+3b83001 rgw: Ensure xmlns is consistent on S3 responses.
+30ce32a cmake: pass RULE_LAUNCHER_* to cython
+2145d72 mailmap: Kris Jurka affiliation
+175b379 AsyncConnection: avoid is_connected require connection's lock
 49886d5 check-generated.sh: can't source bash from sh
+ad489f7 librbd: exclusive lock might be disabled while waiting to acquire lock
+fd24361 librbd: acquire exclusive lock before disabling features
+458bef8 librbd: dynamically disabling journaling needs to block writes
+aa31e52 test: fix race conditions with re-initializing the journal
+3c4a859 librbd: AIO work queue needs to support dynamic feature updates
+032fda5 WorkQueue: add ability to requeue an item that was dequeued
+a33ee34 qa/workunits/rbd: switch qemu test script shell to bash
 8bc8085 tests: add Ubuntu 16.04 xenial dockerfile
 0e4a92e crush: fix typo
 0a622e6 doc: rgw admin uses "region list" not "regions list"
@@ -28,6 +212,7 @@ ac750ce chain_[f]getxattr: always use size, no reaon to consider CHAIN_XATTR_MAX
 8770043 ceph-disk: fix set_data_partition() when data is partition.
 a330078 rgw-admin: fix period delete error message
 3320f8f rgw-admin: remove unused iterator
+4b0e39e osd/ReplicatedPG: make handle_watch_timeout no-op if !active
 64a8a6a rbd-mirror: fixed bug that caused infinite loop when disabling image mirroring
 a651598 mailmap: Luo Kexue name normalization
 c36c5d4 mailmap: Ning Yao affiliation
@@ -191,6 +376,8 @@ e95a383 librbd: send notifications on mirroring updates
 3748b88 librbd: helper methods for mirroring notifications
 934ce86 librbd: mirroring object notification types
 3145109 librbd: generic object watcher and mirroring watcher
+184ec19 pybind: remove language="c++"
+68ce9a9 cmake: remove unneeded C++ from cython build
 bb07a1b rbd: rbd-mirroring: Automatically disable image mirroring when image is removed
 f254486 os/bluestore: _do_write: fix _do_zero_tail_extent to handle shared extents
 2ed445e os/bluestore: _do_zero: simply truncate up if past eof
@@ -841,6 +1028,7 @@ ce33a41 osd: drop unused from arg from handle_pg_peering_evt
 e929b0b osd: only pass history to handle_pg_peering_evt
 363e431 doc: Add French mirror
 868b794 mirrors: Change contact e-mail address for se.ceph.com
+4c3c2ae common: WeightedPriorityQueue Boost 1.60.0 requires some more comparisions for advanced lookp and insertion functions.
 ca16037 pybind: flag an RBD Image as closed regardless of result code
 a8e82a3 librbd: permit watch flush to return error code
 bc62792 rgw: free components on shutdown
@@ -1121,6 +1309,9 @@ b0b4b6d os/bluestore/BlueStore: Fix bug when calc offset & end whether locate in
 cd49615 common/obj_bencher.cc: use more readable constant instead of magic number
 9acb00e ceph.spec.in: do not install Ceph RA on systemd platforms
 acfc06d ceph.spec.in: use %{_prefix} for ocf instead of hardcoding /usr
+e13a4b1 test: common/test_weighted_priority_queue Add Some More Corner Cases
+33f68b8 Revert "test/common/test_weighted_priority_queue Fix the unit tests since the"
+de001bd osd: common/WeightedPriorityQueue.h Re-add Round Robin between classes
 a2d58fc rgw: TempURL of Swift URL does support Content-Disposition override.
 c857fcf rgw: ONLY refactor dump_object_metadata() of rgw_rest_swift.cc.
 26f9d69 rgw: add support for overriding Content-Disposition in GET of Swift API.
diff --git a/ceph.spec b/ceph.spec
index 265a7c9..b452084 100644
--- a/ceph.spec
+++ b/ceph.spec
@@ -74,7 +74,7 @@ restorecon -R /var/log/radosgw > /dev/null 2>&1;
 # common
 #################################################################################
 Name:		ceph
-Version:	10.1.2
+Version:	10.2.0
 Release:	0%{?dist}
 Epoch:		1
 Summary:	User space components of the Ceph file system
@@ -1040,6 +1040,7 @@ if ! getent passwd ceph >/dev/null ; then
     CEPH_USER_ID_OPTION=""
     getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
     useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
+fi
 %endif
 exit 0
 
diff --git a/ceph.spec.in b/ceph.spec.in
index 3a5a6f7..26928f7 100644
--- a/ceph.spec.in
+++ b/ceph.spec.in
@@ -1040,6 +1040,7 @@ if ! getent passwd ceph >/dev/null ; then
     CEPH_USER_ID_OPTION=""
     getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
     useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
+fi
 %endif
 exit 0
 
diff --git a/configure b/configure
index 1a373da..d2d7dc2 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for ceph 10.1.2.
+# Generated by GNU Autoconf 2.69 for ceph 10.2.0.
 #
 # Report bugs to <ceph-devel at vger.kernel.org>.
 #
@@ -590,8 +590,8 @@ MAKEFLAGS=
 # Identity of this package.
 PACKAGE_NAME='ceph'
 PACKAGE_TARNAME='ceph'
-PACKAGE_VERSION='10.1.2'
-PACKAGE_STRING='ceph 10.1.2'
+PACKAGE_VERSION='10.2.0'
+PACKAGE_STRING='ceph 10.2.0'
 PACKAGE_BUGREPORT='ceph-devel at vger.kernel.org'
 PACKAGE_URL=''
 
@@ -1582,7 +1582,7 @@ if test "$ac_init_help" = "long"; then
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures ceph 10.1.2 to adapt to many kinds of systems.
+\`configure' configures ceph 10.2.0 to adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1653,7 +1653,7 @@ fi
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of ceph 10.1.2:";;
+     short | recursive ) echo "Configuration of ceph 10.2.0:";;
    esac
   cat <<\_ACEOF
 
@@ -1837,7 +1837,7 @@ fi
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-ceph configure 10.1.2
+ceph configure 10.2.0
 generated by GNU Autoconf 2.69
 
 Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2913,7 +2913,7 @@ cat >config.log <<_ACEOF
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by ceph $as_me 10.1.2, which was
+It was created by ceph $as_me 10.2.0, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   $ $0 $@
@@ -16408,7 +16408,7 @@ fi
 
 # Define the identity of the package.
  PACKAGE='ceph'
- VERSION='10.1.2'
+ VERSION='10.2.0'
 
 
 cat >>confdefs.h <<_ACEOF
@@ -26100,7 +26100,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by ceph $as_me 10.1.2, which was
+This file was extended by ceph $as_me 10.2.0, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -26166,7 +26166,7 @@ _ACEOF
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
 ac_cs_version="\\
-ceph config.status 10.1.2
+ceph config.status 10.2.0
 configured by $0, generated by GNU Autoconf 2.69,
   with options \\"\$ac_cs_config\\"
 
diff --git a/configure.ac b/configure.ac
index 0d6427f..bfcf4b8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -8,7 +8,7 @@ AC_PREREQ(2.59)
 # VERSION define is not used by the code.  It gets a version string
 # from 'git describe'; see src/ceph_ver.[ch]
 
-AC_INIT([ceph], [10.1.2], [ceph-devel at vger.kernel.org])
+AC_INIT([ceph], [10.2.0], [ceph-devel at vger.kernel.org])
 
 AX_CXX_COMPILE_STDCXX_11(, mandatory)
 
diff --git a/doc/man/8/ceph-deploy.rst b/doc/man/8/ceph-deploy.rst
index 8a04ef3..8782d2a 100644
--- a/doc/man/8/ceph-deploy.rst
+++ b/doc/man/8/ceph-deploy.rst
@@ -569,7 +569,7 @@ Options
 
 .. option:: --fs-type
 
-	Filesystem to use to format disk ``(xfs, btrfs or ext4)``.
+	Filesystem to use to format disk (e.g., ``xfs``, ``btrfs``).
 
 .. option:: --dmcrypt
 
diff --git a/man/ceph-authtool.8 b/man/ceph-authtool.8
index 44c7610..a723c18 100644
--- a/man/ceph-authtool.8
+++ b/man/ceph-authtool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-AUTHTOOL" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-AUTHTOOL" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-authtool \- ceph keyring manipulation tool
 .
diff --git a/man/ceph-clsinfo.8 b/man/ceph-clsinfo.8
index 62dfccb..b52a33f 100644
--- a/man/ceph-clsinfo.8
+++ b/man/ceph-clsinfo.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-CLSINFO" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-CLSINFO" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-clsinfo \- show class object information
 .
diff --git a/man/ceph-conf.8 b/man/ceph-conf.8
index ec78d67..02d3b0c 100644
--- a/man/ceph-conf.8
+++ b/man/ceph-conf.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-CONF" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-CONF" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-conf \- ceph conf file tool
 .
diff --git a/man/ceph-create-keys.8 b/man/ceph-create-keys.8
index 04d8248..03bbd84 100644
--- a/man/ceph-create-keys.8
+++ b/man/ceph-create-keys.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-CREATE-KEYS" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-CREATE-KEYS" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-create-keys \- ceph keyring generate tool
 .
diff --git a/man/ceph-debugpack.8 b/man/ceph-debugpack.8
index b130dc6..4a7e50d 100644
--- a/man/ceph-debugpack.8
+++ b/man/ceph-debugpack.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DEBUGPACK" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-DEBUGPACK" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-debugpack \- ceph debug packer utility
 .
diff --git a/man/ceph-dencoder.8 b/man/ceph-dencoder.8
index 3637d95..10ab1d3 100644
--- a/man/ceph-dencoder.8
+++ b/man/ceph-dencoder.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DENCODER" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-DENCODER" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-dencoder \- ceph encoder/decoder utility
 .
diff --git a/man/ceph-deploy.8 b/man/ceph-deploy.8
index e3d5d4a..4c12db4 100644
--- a/man/ceph-deploy.8
+++ b/man/ceph-deploy.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DEPLOY" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-DEPLOY" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-deploy \- Ceph deployment tool
 .
@@ -772,7 +772,7 @@ Destroy the partition table and content of a disk.
 .INDENT 0.0
 .TP
 .B \-\-fs\-type
-Filesystem to use to format disk \fB(xfs, btrfs or ext4)\fP\&.
+Filesystem to use to format disk (e.g., \fBxfs\fP, \fBbtrfs\fP).
 .UNINDENT
 .INDENT 0.0
 .TP
diff --git a/man/ceph-detect-init.8 b/man/ceph-detect-init.8
index ccd3e6c..142ca72 100644
--- a/man/ceph-detect-init.8
+++ b/man/ceph-detect-init.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DETECT-INIT" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-DETECT-INIT" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-detect-init \- display the init system Ceph should use
 .
diff --git a/man/ceph-disk.8 b/man/ceph-disk.8
index c59faf9..b892d88 100644
--- a/man/ceph-disk.8
+++ b/man/ceph-disk.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-DISK" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-DISK" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-disk \- Ceph disk utility for OSD
 .
diff --git a/man/ceph-fuse.8 b/man/ceph-fuse.8
index 3cea297..381ad29 100644
--- a/man/ceph-fuse.8
+++ b/man/ceph-fuse.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-FUSE" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-FUSE" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-fuse \- FUSE-based client for ceph
 .
diff --git a/man/ceph-mds.8 b/man/ceph-mds.8
index e77e3c2..19dce91 100644
--- a/man/ceph-mds.8
+++ b/man/ceph-mds.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-MDS" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-MDS" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-mds \- ceph metadata server daemon
 .
diff --git a/man/ceph-mon.8 b/man/ceph-mon.8
index a8c744f..750c0b3 100644
--- a/man/ceph-mon.8
+++ b/man/ceph-mon.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-MON" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-MON" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-mon \- ceph monitor daemon
 .
diff --git a/man/ceph-osd.8 b/man/ceph-osd.8
index 94dd69c..380f766 100644
--- a/man/ceph-osd.8
+++ b/man/ceph-osd.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-OSD" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-OSD" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-osd \- ceph object storage daemon
 .
diff --git a/man/ceph-post-file.8 b/man/ceph-post-file.8
index a762e4e..cc7261b 100644
--- a/man/ceph-post-file.8
+++ b/man/ceph-post-file.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-POST-FILE" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-POST-FILE" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-post-file \- post files for ceph developers
 .
diff --git a/man/ceph-rbdnamer.8 b/man/ceph-rbdnamer.8
index 64749e8..0ff5b9a 100644
--- a/man/ceph-rbdnamer.8
+++ b/man/ceph-rbdnamer.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-RBDNAMER" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-RBDNAMER" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-rbdnamer \- udev helper to name RBD devices
 .
diff --git a/man/ceph-rest-api.8 b/man/ceph-rest-api.8
index 62ea726..99bc4b5 100644
--- a/man/ceph-rest-api.8
+++ b/man/ceph-rest-api.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-REST-API" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-REST-API" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-rest-api \- ceph RESTlike administration server
 .
diff --git a/man/ceph-run.8 b/man/ceph-run.8
index 1c78647..0a3c42e 100644
--- a/man/ceph-run.8
+++ b/man/ceph-run.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-RUN" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-RUN" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-run \- restart daemon on core dump
 .
diff --git a/man/ceph-syn.8 b/man/ceph-syn.8
index 0fc4b59..6976fc6 100644
--- a/man/ceph-syn.8
+++ b/man/ceph-syn.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH-SYN" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH-SYN" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph-syn \- ceph synthetic workload generator
 .
diff --git a/man/ceph.8 b/man/ceph.8
index 1a33b1b..07475ea 100644
--- a/man/ceph.8
+++ b/man/ceph.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPH" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPH" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 ceph \- ceph administration tool
 .
diff --git a/man/cephfs.8 b/man/cephfs.8
index 8e234ad..9ee3d4f 100644
--- a/man/cephfs.8
+++ b/man/cephfs.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CEPHFS" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CEPHFS" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 cephfs \- ceph file system options utility
 .
diff --git a/man/crushtool.8 b/man/crushtool.8
index 4859e14..09a2785 100644
--- a/man/crushtool.8
+++ b/man/crushtool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "CRUSHTOOL" "8" "April 12, 2016" "dev" "Ceph"
+.TH "CRUSHTOOL" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 crushtool \- CRUSH map manipulation tool
 .
diff --git a/man/librados-config.8 b/man/librados-config.8
index fc0d539..b1ea8f9 100644
--- a/man/librados-config.8
+++ b/man/librados-config.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "LIBRADOS-CONFIG" "8" "April 12, 2016" "dev" "Ceph"
+.TH "LIBRADOS-CONFIG" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 librados-config \- display information about librados
 .
diff --git a/man/monmaptool.8 b/man/monmaptool.8
index a505fe6..e7ca5c4 100644
--- a/man/monmaptool.8
+++ b/man/monmaptool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "MONMAPTOOL" "8" "April 12, 2016" "dev" "Ceph"
+.TH "MONMAPTOOL" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 monmaptool \- ceph monitor cluster map manipulation tool
 .
diff --git a/man/mount.ceph.8 b/man/mount.ceph.8
index cf0cf68..dd62bb7 100644
--- a/man/mount.ceph.8
+++ b/man/mount.ceph.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "MOUNT.CEPH" "8" "April 12, 2016" "dev" "Ceph"
+.TH "MOUNT.CEPH" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 mount.ceph \- mount a ceph file system
 .
diff --git a/man/osdmaptool.8 b/man/osdmaptool.8
index 1d5e4a7..eb6ea9e 100644
--- a/man/osdmaptool.8
+++ b/man/osdmaptool.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "OSDMAPTOOL" "8" "April 12, 2016" "dev" "Ceph"
+.TH "OSDMAPTOOL" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 osdmaptool \- ceph osd cluster map manipulation tool
 .
diff --git a/man/rados.8 b/man/rados.8
index 670e5ff..93e0ff9 100644
--- a/man/rados.8
+++ b/man/rados.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RADOS" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RADOS" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rados \- rados object storage utility
 .
diff --git a/man/radosgw-admin.8 b/man/radosgw-admin.8
index 8da8cc2..3faee65 100644
--- a/man/radosgw-admin.8
+++ b/man/radosgw-admin.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RADOSGW-ADMIN" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RADOSGW-ADMIN" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 radosgw-admin \- rados REST gateway user administration utility
 .
diff --git a/man/radosgw.8 b/man/radosgw.8
index 5daa2c8..4350688 100644
--- a/man/radosgw.8
+++ b/man/radosgw.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RADOSGW" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RADOSGW" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 radosgw \- rados REST gateway
 .
diff --git a/man/rbd-fuse.8 b/man/rbd-fuse.8
index eaff3ef..a063a36 100644
--- a/man/rbd-fuse.8
+++ b/man/rbd-fuse.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-FUSE" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBD-FUSE" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbd-fuse \- expose rbd images as files
 .
diff --git a/man/rbd-mirror.8 b/man/rbd-mirror.8
index df2c65d..1671173 100644
--- a/man/rbd-mirror.8
+++ b/man/rbd-mirror.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-MIRROR" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBD-MIRROR" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbd-mirror \- Ceph daemon for mirroring RBD images
 .
diff --git a/man/rbd-nbd.8 b/man/rbd-nbd.8
index fafa198..15ec3b4 100644
--- a/man/rbd-nbd.8
+++ b/man/rbd-nbd.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-NBD" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBD-NBD" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbd-nbd \- map rbd images to nbd device
 .
diff --git a/man/rbd-replay-many.8 b/man/rbd-replay-many.8
index 0bb3f1c..138778c 100644
--- a/man/rbd-replay-many.8
+++ b/man/rbd-replay-many.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-REPLAY-MANY" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBD-REPLAY-MANY" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbd-replay-many \- replay a rados block device (RBD) workload on several clients
 .
diff --git a/man/rbd-replay-prep.8 b/man/rbd-replay-prep.8
index 49af438..67311b6 100644
--- a/man/rbd-replay-prep.8
+++ b/man/rbd-replay-prep.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-REPLAY-PREP" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBD-REPLAY-PREP" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbd-replay-prep \- prepare captured rados block device (RBD) workloads for replay
 .
diff --git a/man/rbd-replay.8 b/man/rbd-replay.8
index d876089..8ee7a16 100644
--- a/man/rbd-replay.8
+++ b/man/rbd-replay.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD-REPLAY" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBD-REPLAY" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbd-replay \- replay rados block device (RBD) workloads
 .
diff --git a/man/rbd.8 b/man/rbd.8
index dc19338..eca8906 100644
--- a/man/rbd.8
+++ b/man/rbd.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBD" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBD" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbd \- manage rados block device (RBD) images
 .
diff --git a/man/rbdmap.8 b/man/rbdmap.8
index ebb7d5d..2c340d0 100644
--- a/man/rbdmap.8
+++ b/man/rbdmap.8
@@ -1,6 +1,6 @@
 .\" Man page generated from reStructuredText.
 .
-.TH "RBDMAP" "8" "April 12, 2016" "dev" "Ceph"
+.TH "RBDMAP" "8" "April 20, 2016" "dev" "Ceph"
 .SH NAME
 rbdmap \- map RBD devices at boot time
 .
diff --git a/qa/workunits/erasure-code/encode-decode-non-regression.sh b/qa/workunits/erasure-code/encode-decode-non-regression.sh
index 539b912..bd3289c 100755
--- a/qa/workunits/erasure-code/encode-decode-non-regression.sh
+++ b/qa/workunits/erasure-code/encode-decode-non-regression.sh
@@ -15,7 +15,7 @@
 # GNU Library Public License for more details.
 #
 : ${CORPUS:=https://github.com/ceph/ceph-erasure-code-corpus.git}
-: ${DIRECTORY:=../ceph-erasure-code-corpus}
+: ${DIRECTORY:=$CEPH_ROOT/ceph-erasure-code-corpus}
 
 # when running from sources, the current directory must have precedence
 export PATH=:$PATH
diff --git a/src/.git_version b/src/.git_version
index 17a9430..43fe43a 100644
--- a/src/.git_version
+++ b/src/.git_version
@@ -1,2 +1,2 @@
-4a2a6f72640d6b74a3bbd92798bb913ed380dcd4
-v10.1.2
+3a9fba20ec743699b69bd0181dd6c54dc01c64b9
+v10.2.0
diff --git a/src/Makefile.am b/src/Makefile.am
index fd84dd0..a11325e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -177,6 +177,12 @@ if WITH_LTTNG
 TESTS_ENVIRONMENT = LD_PRELOAD=liblttng-ust-fork.so; export LD_PRELOAD; echo "LD_PRELOAD=$${LD_PRELOAD}";
 endif
 
+AM_TESTS_ENVIRONMENT = export CEPH_ROOT="$(abs_top_srcdir)";
+AM_TESTS_ENVIRONMENT += export CEPH_BUILD_DIR="$(abs_srcdir)";
+AM_TESTS_ENVIRONMENT += export CEPH_BIN="$(abs_srcdir)";
+AM_TESTS_ENVIRONMENT += export CEPH_LIB="$(abs_srcdir)/.libs";
+AM_TESTS_ENVIRONMENT += export PATH="$(abs_srcdir):$$PATH";
+
 # base targets
 
 core-daemons: ceph-mon ceph-osd ceph-mds radosgw
diff --git a/src/Makefile.in b/src/Makefile.in
index 4cf18ab..9c1a7cf 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -12409,6 +12409,9 @@ libos_tp_la_LDFLAGS = -version-info 1:0:0
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE at CYTHON_BUILD_DIR = "$(shell readlink -f $(builddir))/build"
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE at PY_DISTUTILS = \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@	mkdir -p $(CYTHON_BUILD_DIR); \
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@	CC="${CC}" \
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@	CXX="${CXX}" \
+ at ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@	LDSHARED="${CC} -shared" \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@	CPPFLAGS="-iquote \${abs_srcdir}/include ${AM_CPPFLAGS} ${CPPFLAGS}" \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@	CFLAGS="-iquote \${abs_srcdir}/include ${AM_CFLAGS} ${PYTHON_CFLAGS}" \
 @ENABLE_CLIENT_TRUE@@WITH_CYTHON_TRUE@	LDFLAGS="-L\${abs_builddir}/.libs $(subst -pie,,${AM_LDFLAGS}) ${PYTHON_LDFLAGS}" \
@@ -12443,6 +12446,11 @@ ceph_libexec_SCRIPTS = ceph_common.sh ceph-osd-prestart.sh
 # See: https://www.gnu.org/software/automake/manual/html_node/Scripts_002dbased-Testsuites.html
 # I don't see the serial-tests Automake option anywhere, but my AM_TESTS_ENVIRONMENT was being ignored.
 @WITH_LTTNG_TRUE at TESTS_ENVIRONMENT = LD_PRELOAD=liblttng-ust-fork.so; export LD_PRELOAD; echo "LD_PRELOAD=$${LD_PRELOAD}";
+AM_TESTS_ENVIRONMENT = export CEPH_ROOT="$(abs_top_srcdir)"; export \
+	CEPH_BUILD_DIR="$(abs_srcdir)"; export \
+	CEPH_BIN="$(abs_srcdir)"; export \
+	CEPH_LIB="$(abs_srcdir)/.libs"; export \
+	PATH="$(abs_srcdir):$$PATH";
 
 # pybind
 python_PYTHON = $(am__append_293) $(am__append_303) $(am__append_308)
diff --git a/src/common/WeightedPriorityQueue.h b/src/common/WeightedPriorityQueue.h
index b1c48f3..82b1fa0 100644
--- a/src/common/WeightedPriorityQueue.h
+++ b/src/common/WeightedPriorityQueue.h
@@ -23,15 +23,15 @@
 
 namespace bi = boost::intrusive;
 
-template <typename T>
+template <typename T, typename S>
 class MapKey
 {
   public:
-  bool operator()(const unsigned i, const T &k) const
+  bool operator()(const S i, const T &k) const
   {
     return i < k.key;
   }
-  bool operator()(const T &k, const unsigned i) const
+  bool operator()(const T &k, const S i) const
   {
     return k.key < i;
   }
@@ -52,41 +52,134 @@ class WeightedPriorityQueue :  public OpQueue <T, K>
     class ListPair : public bi::list_base_hook<>
     {
       public:
-	K klass;
         unsigned cost;
         T item;
-        ListPair(K& k, unsigned c, T& i) :
-	  klass(k),
+        ListPair(unsigned c, T& i) :
           cost(c),
           item(i)
           {}
     };
+    class Klass : public bi::set_base_hook<>
+    {
+      typedef bi::list<ListPair> ListPairs;
+      typedef typename ListPairs::iterator Lit;
+      public:
+        K key;		// klass
+        ListPairs lp;
+        Klass(K& k) :
+          key(k)
+          {}
+      friend bool operator< (const Klass &a, const Klass &b)
+        { return a.key < b.key; }
+      friend bool operator> (const Klass &a, const Klass &b)
+        { return a.key > b.key; }
+      friend bool operator== (const Klass &a, const Klass &b)
+        { return a.key == b.key; }
+      void insert(unsigned cost, T& item, bool front) {
+        if (front) {
+          lp.push_front(*new ListPair(cost, item));
+        } else {
+          lp.push_back(*new ListPair(cost, item));
+        }
+      }
+      //Get the cost of the next item to dequeue
+      unsigned get_cost() const {
+	return lp.begin()->cost;
+      }
+      T pop() {
+	assert(!lp.empty());
+	T ret = lp.begin()->item;
+        lp.erase_and_dispose(lp.begin(), DelItem<ListPair>());
+        return ret;
+      }
+      bool empty() const {
+        return lp.empty();
+      }
+      unsigned get_size() const {
+	return lp.size();
+      }
+      unsigned filter_list_pairs(std::function<bool (T)>& f,
+        std::list<T>* out) {
+        unsigned count = 0;
+        // intrusive containers can't erase with a reverse_iterator
+        // so we have to walk backwards on our own. Since there is
+        // no iterator before begin, we have to test at the end.
+        for (Lit i = --lp.end();; --i) {
+          if (f(i->item)) {
+            if (out) {
+	      out->push_front(i->item);
+            }
+            i = lp.erase_and_dispose(i, DelItem<ListPair>());
+            ++count;
+          }
+          if (i == lp.begin()) {
+            break;
+          }
+        }
+        return count;
+      }
+      unsigned filter_class(std::list<T>* out) {
+        unsigned count = 0;
+        for (Lit i = --lp.end();; --i) {
+          if (out) {
+            out->push_front(i->item);
+          }
+          i = lp.erase_and_dispose(i, DelItem<ListPair>());
+          ++count;
+          if (i == lp.begin()) {
+            break;
+          }
+        }
+        return count;
+      }
+    };
     class SubQueue : public bi::set_base_hook<>
     {
-      typedef bi::list<ListPair> QueueItems;
-      typedef typename QueueItems::iterator QI;
+      typedef bi::rbtree<Klass> Klasses;
+      typedef typename Klasses::iterator Kit;
+      void check_end() {
+        if (next == klasses.end()) {
+          next = klasses.begin();
+        }
+      }
       public:
 	unsigned key;	// priority
-	QueueItems qitems;
+        Klasses klasses;
+	Kit next;
 	SubQueue(unsigned& p) :
-	  key(p)
+	  key(p),
+	  next(klasses.begin())
 	  {}
+      friend bool operator< (const SubQueue &a, const SubQueue &b)
+        { return a.key < b.key; }
+      friend bool operator> (const SubQueue &a, const SubQueue &b)
+        { return a.key > b.key; }
+      friend bool operator== (const SubQueue &a, const SubQueue &b)
+        { return a.key == b.key; }
       bool empty() const {
-        return qitems.empty();
+        return klasses.empty();
       }
-      void insert(K& cl, unsigned cost, T& item, bool front = false) {
-	if (front) {
-	  qitems.push_front(*new ListPair(cl, cost, item));
-	} else {
-	  qitems.push_back(*new ListPair(cl, cost, item));
+      void insert(K cl, unsigned cost, T& item, bool front = false) {
+        typename Klasses::insert_commit_data insert_data;
+      	std::pair<Kit, bool> ret =
+          klasses.insert_unique_check(cl, MapKey<Klass, K>(), insert_data);
+      	if (ret.second) {
+      	  ret.first = klasses.insert_unique_commit(*new Klass(cl), insert_data);
+          check_end();
 	}
+	ret.first->insert(cost, item, front);
       }
       unsigned get_cost() const {
-	return qitems.begin()->cost;
+	return next->get_cost();
       }
       T pop() {
-	T ret = qitems.begin()->item;
-	qitems.erase_and_dispose(qitems.begin(), DelItem<ListPair>());
+        T ret = next->pop();
+        if (next->empty()) {
+          next = klasses.erase_and_dispose(next, DelItem<Klass>());
+        } else {
+	  ++next;
+	}
+        check_end();
 	return ret;
       }
       unsigned filter_list_pairs(std::function<bool (T)>& f, std::list<T>* out) {
@@ -94,39 +187,36 @@ class WeightedPriorityQueue :  public OpQueue <T, K>
         // intrusive containers can't erase with a reverse_iterator
         // so we have to walk backwards on our own. Since there is
         // no iterator before begin, we have to test at the end.
-        for (QI i = --qitems.end();; --i) {
-          if (f(i->item)) {
-            if (out) {
-              out->push_front(i->item);
-            }
-            i = qitems.erase_and_dispose(i, DelItem<ListPair>());
-            ++count;
-          }
-          if (i == qitems.begin()) {
-            break;
+        for (Kit i = klasses.begin(); i != klasses.end();) {
+          count += i->filter_list_pairs(f, out);
+          if (i->empty()) {
+	    if (next == i) {
+	      ++next;
+	    }
+            i = klasses.erase_and_dispose(i, DelItem<Klass>());
+          } else {
+            ++i;
           }
         }
+        check_end();
 	return count;
       }
       unsigned filter_class(K& cl, std::list<T>* out) {
 	unsigned count = 0;
-        for (QI i = --qitems.end();; --i) {
-	  if (i->klass == cl) {
-	    if (out) {
-	      out->push_front(i->item);
-	    }
-	    i = qitems.erase_and_dispose(i, DelItem<ListPair>());
-	    ++count;
-	  }
-	  if (i == qitems.begin()) {
-	    break;
-	  }
+        Kit i = klasses.find(cl, MapKey<Klass, K>());
+        if (i != klasses.end()) {
+          count = i->filter_class(out);
+	  Kit tmp = klasses.erase_and_dispose(i, DelItem<Klass>());
+	  if (next == i) {
+            next = tmp;
+          }
+          check_end();
         }
 	return count;
       }
       void dump(ceph::Formatter *f) const {
-	f->dump_int("num_keys", qitems.size());
-	f->dump_int("first_item_cost", qitems.begin()->cost);
+	f->dump_int("num_keys", next->get_size());
+	f->dump_int("first_item_cost", next->get_cost());
       }
     };
     class Queue {
@@ -145,10 +235,10 @@ class WeightedPriorityQueue :  public OpQueue <T, K>
 	bool empty() const {
 	  return !size;
 	}
-	void insert(unsigned p, K& cl, unsigned cost, T& item, bool front = false) {
+	void insert(unsigned p, K cl, unsigned cost, T& item, bool front = false) {
 	  typename SubQueues::insert_commit_data insert_data;
       	  std::pair<typename SubQueues::iterator, bool> ret =
-      	    queues.insert_unique_check(p, MapKey<SubQueue>(), insert_data);
+      	    queues.insert_unique_check(p, MapKey<SubQueue, unsigned>(), insert_data);
       	  if (ret.second) {
       	    ret.first = queues.insert_unique_commit(*new SubQueue(p), insert_data);
 	    total_prio += p;
diff --git a/src/common/WorkQueue.h b/src/common/WorkQueue.h
index 5ce6952..bffe12b 100644
--- a/src/common/WorkQueue.h
+++ b/src/common/WorkQueue.h
@@ -414,6 +414,11 @@ public:
       }
       return m_items.front();
     }
+    void requeue(T *item) {
+      Mutex::Locker pool_locker(m_pool->_lock);
+      _void_process_finish(nullptr);
+      m_items.push_front(item);
+    }
     void signal() {
       Mutex::Locker pool_locker(m_pool->_lock);
       m_pool->_cond.SignalOne();
diff --git a/src/common/buffer.cc b/src/common/buffer.cc
index 63339ea..2e6d009 100644
--- a/src/common/buffer.cc
+++ b/src/common/buffer.cc
@@ -660,7 +660,7 @@ static simple_spinlock_t buffer_debug_lock = SIMPLE_SPINLOCK_INITIALIZER;
   public:
     struct xio_reg_mem *mp;
     xio_mempool(struct xio_reg_mem *_mp, unsigned l) :
-      raw((char*)mp->addr, l), mp(_mp)
+      raw((char*)_mp->addr, l), mp(_mp)
     { }
     ~xio_mempool() {}
     raw* clone_empty() {
@@ -1780,6 +1780,19 @@ static simple_spinlock_t buffer_debug_lock = SIMPLE_SPINLOCK_INITIALIZER;
     return _buffers.front().c_str();  // good, we're already contiguous.
   }
 
+  string buffer::list::to_str() const {
+    string s;
+    s.reserve(length());
+    for (std::list<ptr>::const_iterator p = _buffers.begin();
+	 p != _buffers.end();
+	 ++p) {
+      if (p->length()) {
+	s.append(p->c_str(), p->length());
+      }
+    }
+    return s;
+  }
+
   char *buffer::list::get_contiguous(unsigned orig_off, unsigned len)
   {
     if (orig_off + len > length())
diff --git a/src/common/cohort_lru.h b/src/common/cohort_lru.h
index 19a5c45..e4cdb7d 100644
--- a/src/common/cohort_lru.h
+++ b/src/common/cohort_lru.h
@@ -290,6 +290,8 @@ namespace cohort {
 	Partition* p;
 	LK* lock;
 	insert_commit_data commit_data;
+
+	Latch() : p(nullptr), lock(nullptr) {}
       };
 
       Partition& partition_of_scalar(uint64_t x) {
diff --git a/src/common/config_opts.h b/src/common/config_opts.h
index c2a577f..b8a0cec 100644
--- a/src/common/config_opts.h
+++ b/src/common/config_opts.h
@@ -556,6 +556,9 @@ OPTION(mds_max_scrub_ops_in_progress, OPT_INT, 5) // the number of simultaneous
 // Maximum number of damaged frags/dentries before whole MDS rank goes damaged
 OPTION(mds_damage_table_max_entries, OPT_INT, 10000)
 
+// verify backend can support configured max object name length
+OPTION(osd_check_max_object_name_len_on_startup, OPT_BOOL, true)
+
 // If true, compact leveldb store on mount
 OPTION(osd_compact_leveldb_on_mount, OPT_BOOL, false)
 
@@ -1165,6 +1168,7 @@ OPTION(rbd_skip_partial_discard, OPT_BOOL, false) // when trying to discard a ra
 OPTION(rbd_enable_alloc_hint, OPT_BOOL, true) // when writing a object, it will issue a hint to osd backend to indicate the expected size object need
 OPTION(rbd_tracing, OPT_BOOL, false) // true if LTTng-UST tracepoints should be enabled
 OPTION(rbd_validate_pool, OPT_BOOL, true) // true if empty pools should be validated for RBD compatibility
+OPTION(rbd_validate_names, OPT_BOOL, true) // true if image specs should be validated
 
 /*
  * The following options change the behavior for librbd's image creation methods that
diff --git a/src/include/buffer.h b/src/include/buffer.h
index 1397ae9..c786bf2 100644
--- a/src/include/buffer.h
+++ b/src/include/buffer.h
@@ -558,6 +558,8 @@ namespace buffer CEPH_BUFFER_API {
      */
     const char& operator[](unsigned n) const;
     char *c_str();
+    std::string to_str() const;
+
     void substr_of(const list& other, unsigned off, unsigned len);
 
     /// return a pointer to a contiguous extent of the buffer,
diff --git a/src/include/rados/buffer.h b/src/include/rados/buffer.h
index 1397ae9..c786bf2 100644
--- a/src/include/rados/buffer.h
+++ b/src/include/rados/buffer.h
@@ -558,6 +558,8 @@ namespace buffer CEPH_BUFFER_API {
      */
     const char& operator[](unsigned n) const;
     char *c_str();
+    std::string to_str() const;
+
     void substr_of(const list& other, unsigned off, unsigned len);
 
     /// return a pointer to a contiguous extent of the buffer,
diff --git a/src/init-ceph.in b/src/init-ceph.in
index 731d103..8b6d314 100755
--- a/src/init-ceph.in
+++ b/src/init-ceph.in
@@ -38,6 +38,16 @@ else
     ASSUME_DEV=0
 fi
 
+if [ -n $CEPH_BIN ] && [ -n $CEPH_ROOT ] && [ -n $CEPH_BUILD_DIR ]; then
+  #need second look at all variables, especially ETCDIR
+  BINDIR=$CEPH_BIN
+  SBINDIR=$CEPH_ROOT/src
+  ETCDIR=$CEPH_BUILD_DIR
+  LIBEXECDIR=$CEPH_ROOT/src
+  SYSTEMD_RUN=""
+  ASSUME_DEV=1
+fi
+
 usage_exit() {
     echo "usage: $0 [options] {start|stop|restart|condrestart} [mon|osd|mds]..."
     printf "Core options:\n"
diff --git a/src/journal/JournalPlayer.cc b/src/journal/JournalPlayer.cc
index 0d6b177..aee8716 100644
--- a/src/journal/JournalPlayer.cc
+++ b/src/journal/JournalPlayer.cc
@@ -535,24 +535,31 @@ void JournalPlayer::schedule_watch() {
   // poll first splay offset and active splay offset since
   // new records should only appear in those two objects
   C_Watch *ctx = new C_Watch(this);
-  ObjectPlayerPtr object_player = get_object_player();
-  object_player->watch(ctx, m_watch_interval);
 
+  ObjectPlayerPtr object_player = get_object_player();
   uint8_t splay_width = m_journal_metadata->get_splay_width();
   if (object_player->get_object_number() % splay_width != 0) {
     ++ctx->pending_fetches;
 
-    object_player = m_object_players.begin()->second.begin()->second;
-    object_player->watch(ctx, m_watch_interval);
+    ObjectPlayerPtr first_object_player =
+      m_object_players.begin()->second.begin()->second;
+    first_object_player->watch(ctx, m_watch_interval);
   }
+
+  object_player->watch(ctx, m_watch_interval);
   m_watch_scheduled = true;
 }
 
 void JournalPlayer::handle_watch(int r) {
   ldout(m_cct, 10) << __func__ << ": r=" << r << dendl;
+  if (r == -ECANCELED) {
+    // unwatch of object player(s)
+    return;
+  }
 
   Mutex::Locker locker(m_lock);
   m_watch_scheduled = false;
+
   std::set<uint64_t> object_numbers;
   for (auto &players : m_object_players) {
     object_numbers.insert(
diff --git a/src/journal/JournalPlayer.h b/src/journal/JournalPlayer.h
index 476b49b..80a9ff7 100644
--- a/src/journal/JournalPlayer.h
+++ b/src/journal/JournalPlayer.h
@@ -70,24 +70,26 @@ private:
 
   struct C_Watch : public Context {
     JournalPlayer *player;
+    Mutex lock;
     uint8_t pending_fetches = 1;
     int ret_val = 0;
 
-    C_Watch(JournalPlayer *player) : player(player) {
+    C_Watch(JournalPlayer *player)
+      : player(player), lock("JournalPlayer::C_Watch::lock") {
     }
 
     virtual void complete(int r) override {
-      player->m_lock.Lock();
+      lock.Lock();
       if (ret_val == 0 && r < 0) {
         ret_val = r;
       }
 
       assert(pending_fetches > 0);
       if (--pending_fetches == 0) {
-        player->m_lock.Unlock();
+        lock.Unlock();
         Context::complete(ret_val);
       } else {
-        player->m_lock.Unlock();
+        lock.Unlock();
       }
     }
 
diff --git a/src/journal/ObjectPlayer.cc b/src/journal/ObjectPlayer.cc
index db49d46..894b56f 100644
--- a/src/journal/ObjectPlayer.cc
+++ b/src/journal/ObjectPlayer.cc
@@ -32,6 +32,7 @@ ObjectPlayer::~ObjectPlayer() {
     Mutex::Locker timer_locker(m_timer_lock);
     Mutex::Locker locker(m_lock);
     assert(!m_fetch_in_progress);
+    assert(!m_watch_in_progress);
     assert(m_watch_ctx == NULL);
   }
 }
@@ -64,23 +65,28 @@ void ObjectPlayer::watch(Context *on_fetch, double interval) {
   assert(m_watch_ctx == NULL);
   m_watch_ctx = on_fetch;
 
-  schedule_watch();
+  // watch callback might lead to re-scheduled watch
+  if (!m_watch_in_progress) {
+    schedule_watch();
+  }
 }
 
 void ObjectPlayer::unwatch() {
   ldout(m_cct, 20) << __func__ << ": " << m_oid << " unwatch" << dendl;
-  Mutex::Locker timer_locker(m_timer_lock);
+  Context *watch_ctx = nullptr;
+  {
+    Mutex::Locker timer_locker(m_timer_lock);
 
-  cancel_watch();
+    cancel_watch();
 
-  Context *watch_ctx = nullptr;
-  std::swap(watch_ctx, m_watch_ctx);
-  if (watch_ctx != nullptr) {
-    delete watch_ctx;
+    std::swap(watch_ctx, m_watch_ctx);
+    while (m_watch_in_progress) {
+      m_watch_in_progress_cond.Wait(m_timer_lock);
+    }
   }
 
-  while (m_watch_in_progress) {
-    m_watch_in_progress_cond.Wait(m_timer_lock);
+  if (watch_ctx != nullptr) {
+    watch_ctx->complete(-ECANCELED);
   }
 }
 
@@ -199,6 +205,7 @@ void ObjectPlayer::handle_watch_task() {
   ldout(m_cct, 10) << __func__ << ": " << m_oid << " polling" << dendl;
   assert(m_watch_ctx != NULL);
 
+  assert(!m_watch_in_progress);
   m_watch_in_progress = true;
   m_watch_task = NULL;
   fetch(new C_WatchFetch(this));
@@ -224,6 +231,13 @@ void ObjectPlayer::handle_watch_fetched(int r) {
 
   {
     Mutex::Locker locker(m_timer_lock);
+    assert(m_watch_in_progress);
+
+    // callback might have attempted to re-schedule the watch -- complete now
+    if (m_watch_ctx != nullptr) {
+      schedule_watch();
+    }
+
     m_watch_in_progress = false;
     m_watch_in_progress_cond.Signal();
   }
diff --git a/src/librbd/AioImageRequestWQ.cc b/src/librbd/AioImageRequestWQ.cc
index 352b7e9..97c6fa4 100644
--- a/src/librbd/AioImageRequestWQ.cc
+++ b/src/librbd/AioImageRequestWQ.cc
@@ -302,17 +302,24 @@ void AioImageRequestWQ::clear_require_lock_on_read() {
 
 void *AioImageRequestWQ::_void_dequeue() {
   AioImageRequest<> *peek_item = front();
-  if (peek_item == NULL || m_refresh_in_progress) {
-    return NULL;
+
+  // no IO ops available or refresh in-progress (IO stalled)
+  if (peek_item == nullptr || m_refresh_in_progress) {
+    return nullptr;
   }
 
+  bool refresh_required = m_image_ctx.state->is_refresh_required();
   {
     RWLock::RLocker locker(m_lock);
     if (peek_item->is_write_op()) {
       if (m_write_blockers > 0) {
-        return NULL;
+        return nullptr;
+      }
+
+      // refresh will requeue the op -- don't count it as in-progress
+      if (!refresh_required) {
+        m_in_progress_writes.inc();
       }
-      m_in_progress_writes.inc();
     } else if (m_require_lock_on_read) {
       return nullptr;
     }
@@ -322,15 +329,17 @@ void *AioImageRequestWQ::_void_dequeue() {
     ThreadPool::PointerWQ<AioImageRequest<> >::_void_dequeue());
   assert(peek_item == item);
 
-  if (m_image_ctx.state->is_refresh_required()) {
+  if (refresh_required) {
     ldout(m_image_ctx.cct, 15) << "image refresh required: delaying IO " << item
                                << dendl;
+
+    // stall IO until the refresh completes
     m_refresh_in_progress = true;
 
     get_pool_lock().Unlock();
     m_image_ctx.state->refresh(new C_RefreshFinish(this, item));
     get_pool_lock().Lock();
-    return NULL;
+    return nullptr;
   }
   return item;
 }
@@ -345,21 +354,34 @@ void AioImageRequestWQ::process(AioImageRequest<> *req) {
     req->send();
   }
 
+  finish_queued_op(req);
+  if (req->is_write_op()) {
+    finish_in_progress_write();
+  }
+  delete req;
+
+  finish_in_flight_op();
+}
+
+void AioImageRequestWQ::finish_queued_op(AioImageRequest<> *req) {
+  RWLock::RLocker locker(m_lock);
+  if (req->is_write_op()) {
+    assert(m_queued_writes.read() > 0);
+    m_queued_writes.dec();
+  } else {
+    assert(m_queued_reads.read() > 0);
+    m_queued_reads.dec();
+  }
+}
+
+void AioImageRequestWQ::finish_in_progress_write() {
   bool writes_blocked = false;
   {
     RWLock::RLocker locker(m_lock);
-    if (req->is_write_op()) {
-      assert(m_queued_writes.read() > 0);
-      m_queued_writes.dec();
-
-      assert(m_in_progress_writes.read() > 0);
-      if (m_in_progress_writes.dec() == 0 &&
-          !m_write_blocker_contexts.empty()) {
-        writes_blocked = true;
-      }
-    } else {
-      assert(m_queued_reads.read() > 0);
-      m_queued_reads.dec();
+    assert(m_in_progress_writes.read() > 0);
+    if (m_in_progress_writes.dec() == 0 &&
+        !m_write_blocker_contexts.empty()) {
+      writes_blocked = true;
     }
   }
 
@@ -367,9 +389,6 @@ void AioImageRequestWQ::process(AioImageRequest<> *req) {
     RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
     m_image_ctx.flush(new C_BlockedWrites(this));
   }
-  delete req;
-
-  finish_in_flight_op();
 }
 
 int AioImageRequestWQ::start_in_flight_op(AioCompletion *c) {
@@ -440,12 +459,24 @@ void AioImageRequestWQ::handle_refreshed(int r, AioImageRequest<> *req) {
                  << "req=" << req << dendl;
   if (r < 0) {
     req->fail(r);
+    delete req;
+
+    finish_queued_op(req);
+    finish_in_flight_op();
   } else {
-    process(req);
-    process_finish();
+    // since IO was stalled for refresh -- original IO order is preserved
+    // if we requeue this op for work queue processing
+    requeue(req);
+  }
 
-    m_refresh_in_progress = false;
-    signal();
+  m_refresh_in_progress = false;
+  signal();
+
+  // refresh might have enabled exclusive lock -- IO stalled until
+  // we acquire the lock
+  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  if (is_lock_required() && is_lock_request_needed()) {
+    m_image_ctx.exclusive_lock->request_lock(nullptr);
   }
 }
 
diff --git a/src/librbd/AioImageRequestWQ.h b/src/librbd/AioImageRequestWQ.h
index fa429fc..e21aa86 100644
--- a/src/librbd/AioImageRequestWQ.h
+++ b/src/librbd/AioImageRequestWQ.h
@@ -104,6 +104,9 @@ private:
     return (m_queued_writes.read() == 0);
   }
 
+  void finish_queued_op(AioImageRequest<ImageCtx> *req);
+  void finish_in_progress_write();
+
   int start_in_flight_op(AioCompletion *c);
   void finish_in_flight_op();
 
diff --git a/src/librbd/ExclusiveLock.cc b/src/librbd/ExclusiveLock.cc
index 82d0042..01043e5 100644
--- a/src/librbd/ExclusiveLock.cc
+++ b/src/librbd/ExclusiveLock.cc
@@ -61,6 +61,7 @@ bool ExclusiveLock<I>::is_lock_owner() const {
   case STATE_LOCKED:
   case STATE_POST_ACQUIRING:
   case STATE_PRE_RELEASING:
+  case STATE_PRE_SHUTTING_DOWN:
     lock_owner = true;
     break;
   default:
@@ -104,35 +105,43 @@ template <typename I>
 void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
   ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
 
-  Mutex::Locker locker(m_lock);
-  assert(!is_shutdown());
-  execute_action(ACTION_SHUT_DOWN, on_shut_down);
+  {
+    Mutex::Locker locker(m_lock);
+    assert(!is_shutdown());
+    execute_action(ACTION_SHUT_DOWN, on_shut_down);
+  }
+
+  // if stalled in request state machine -- abort
+  handle_lock_released();
 }
 
 template <typename I>
 void ExclusiveLock<I>::try_lock(Context *on_tried_lock) {
+  int r = 0;
   {
     Mutex::Locker locker(m_lock);
     assert(m_image_ctx.owner_lock.is_locked());
-    assert(!is_shutdown());
-
-    if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
+    if (is_shutdown()) {
+      r = -ESHUTDOWN;
+    } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
       ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
       execute_action(ACTION_TRY_LOCK, on_tried_lock);
       return;
     }
   }
 
-  on_tried_lock->complete(0);
+  on_tried_lock->complete(r);
 }
 
 template <typename I>
 void ExclusiveLock<I>::request_lock(Context *on_locked) {
+  int r = 0;
   {
     Mutex::Locker locker(m_lock);
     assert(m_image_ctx.owner_lock.is_locked());
-    assert(!is_shutdown());
-    if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
+    if (is_shutdown()) {
+      r = -ESHUTDOWN;
+    } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
       ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
       execute_action(ACTION_REQUEST_LOCK, on_locked);
       return;
@@ -140,25 +149,26 @@ void ExclusiveLock<I>::request_lock(Context *on_locked) {
   }
 
   if (on_locked != nullptr) {
-    on_locked->complete(0);
+    on_locked->complete(r);
   }
 }
 
 template <typename I>
 void ExclusiveLock<I>::release_lock(Context *on_released) {
+  int r = 0;
   {
     Mutex::Locker locker(m_lock);
     assert(m_image_ctx.owner_lock.is_locked());
-    assert(!is_shutdown());
-
-    if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
+    if (is_shutdown()) {
+      r = -ESHUTDOWN;
+    } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
       ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
       execute_action(ACTION_RELEASE_LOCK, on_released);
       return;
     }
   }
 
-  on_released->complete(0);
+  on_released->complete(r);
 }
 
 template <typename I>
@@ -210,6 +220,7 @@ bool ExclusiveLock<I>::is_transition_state() const {
   case STATE_POST_ACQUIRING:
   case STATE_PRE_RELEASING:
   case STATE_RELEASING:
+  case STATE_PRE_SHUTTING_DOWN:
   case STATE_SHUTTING_DOWN:
     return true;
   case STATE_UNINITIALIZED:
@@ -466,16 +477,14 @@ void ExclusiveLock<I>::send_shutdown() {
   assert(m_lock.is_locked());
   if (m_state == STATE_UNLOCKED) {
     m_state = STATE_SHUTTING_DOWN;
-    m_image_ctx.aio_work_queue->clear_require_lock_on_read();
-    m_image_ctx.aio_work_queue->unblock_writes();
-    m_image_ctx.image_watcher->flush(util::create_context_callback<
-      ExclusiveLock<I>, &ExclusiveLock<I>::complete_shutdown>(this));
+    m_image_ctx.op_work_queue->queue(util::create_context_callback<
+      ExclusiveLock<I>, &ExclusiveLock<I>::handle_shutdown>(this), 0);
     return;
   }
 
   ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
   assert(m_state == STATE_LOCKED);
-  m_state = STATE_SHUTTING_DOWN;
+  m_state = STATE_PRE_SHUTTING_DOWN;
 
   m_lock.Unlock();
   m_image_ctx.op_work_queue->queue(new C_ShutDownRelease(this), 0);
@@ -492,16 +501,34 @@ void ExclusiveLock<I>::send_shutdown_release() {
 
   using el = ExclusiveLock<I>;
   ReleaseRequest<I>* req = ReleaseRequest<I>::create(
-    m_image_ctx, cookie, nullptr,
-    util::create_context_callback<el, &el::handle_shutdown>(this));
+    m_image_ctx, cookie,
+    util::create_context_callback<el, &el::handle_shutdown_releasing>(this),
+    util::create_context_callback<el, &el::handle_shutdown_released>(this));
   req->send();
 }
 
 template <typename I>
-void ExclusiveLock<I>::handle_shutdown(int r) {
+void ExclusiveLock<I>::handle_shutdown_releasing(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
+
+  assert(r == 0);
+  assert(m_state == STATE_PRE_SHUTTING_DOWN);
+
+  // all IO and ops should be blocked/canceled by this point
+  m_state = STATE_SHUTTING_DOWN;
+}
+
+template <typename I>
+void ExclusiveLock<I>::handle_shutdown_released(int r) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
 
+  {
+    RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+    m_image_ctx.exclusive_lock = nullptr;
+  }
+
   if (r < 0) {
     lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(r)
                << dendl;
@@ -515,6 +542,22 @@ void ExclusiveLock<I>::handle_shutdown(int r) {
 }
 
 template <typename I>
+void ExclusiveLock<I>::handle_shutdown(int r) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
+
+  {
+    RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+    m_image_ctx.exclusive_lock = nullptr;
+  }
+
+  m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+  m_image_ctx.aio_work_queue->unblock_writes();
+  m_image_ctx.image_watcher->flush(util::create_context_callback<
+    ExclusiveLock<I>, &ExclusiveLock<I>::complete_shutdown>(this));
+}
+
+template <typename I>
 void ExclusiveLock<I>::complete_shutdown(int r) {
   ActionContexts action_contexts;
   {
diff --git a/src/librbd/ExclusiveLock.h b/src/librbd/ExclusiveLock.h
index 910c67f..e2e1416 100644
--- a/src/librbd/ExclusiveLock.h
+++ b/src/librbd/ExclusiveLock.h
@@ -67,7 +67,7 @@ private:
    *    |
    *    |
    *    v
-   * SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
+   * PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
    */
   enum State {
     STATE_UNINITIALIZED,
@@ -79,6 +79,7 @@ private:
     STATE_WAITING_FOR_PEER,
     STATE_PRE_RELEASING,
     STATE_RELEASING,
+    STATE_PRE_SHUTTING_DOWN,
     STATE_SHUTTING_DOWN,
     STATE_SHUTDOWN,
   };
@@ -151,6 +152,8 @@ private:
 
   void send_shutdown();
   void send_shutdown_release();
+  void handle_shutdown_releasing(int r);
+  void handle_shutdown_released(int r);
   void handle_shutdown(int r);
   void complete_shutdown(int r);
 };
diff --git a/src/librbd/ImageWatcher.cc b/src/librbd/ImageWatcher.cc
index 9579723..b48974e 100644
--- a/src/librbd/ImageWatcher.cc
+++ b/src/librbd/ImageWatcher.cc
@@ -403,6 +403,11 @@ void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) {
 void ImageWatcher::notify_request_lock() {
   RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
   RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+
+  // ExclusiveLock state machine can be dynamically disabled
+  if (m_image_ctx.exclusive_lock == nullptr) {
+    return;
+  }
   assert(!m_image_ctx.exclusive_lock->is_lock_owner());
 
   ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl;
@@ -417,8 +422,11 @@ void ImageWatcher::handle_request_lock(int r) {
   RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
   RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
 
-  // ExclusiveLock state machine cannot transition
-  assert(!m_image_ctx.exclusive_lock->is_lock_owner());
+  // ExclusiveLock state machine cannot transition -- but can be
+  // dynamically disabled
+  if (m_image_ctx.exclusive_lock == nullptr) {
+    return;
+  }
 
   if (r == -ETIMEDOUT) {
     ldout(m_image_ctx.cct, 5) << this << " timed out requesting lock: retrying"
@@ -459,6 +467,9 @@ Context *ImageWatcher::remove_async_request(const AsyncRequestId &id) {
 }
 
 void ImageWatcher::schedule_async_request_timed_out(const AsyncRequestId &id) {
+  ldout(m_image_ctx.cct, 20) << "scheduling async request time out: " << id
+                             << dendl;
+
   Context *ctx = new FunctionContext(boost::bind(
     &ImageWatcher::async_request_timed_out, this, id));
 
@@ -896,6 +907,11 @@ void ImageWatcher::reregister_watch() {
   int r;
   if (releasing_lock) {
     r = release_lock_ctx.wait();
+    if (r == -EBLACKLISTED) {
+      lderr(m_image_ctx.cct) << this << " client blacklisted" << dendl;
+      return;
+    }
+
     assert(r == 0);
   }
 
diff --git a/src/librbd/Operations.cc b/src/librbd/Operations.cc
index f8151c9..81219d4 100644
--- a/src/librbd/Operations.cc
+++ b/src/librbd/Operations.cc
@@ -41,12 +41,28 @@ struct C_NotifyUpdate : public Context {
   }
 
   virtual void complete(int r) override {
-    if (r < 0 || notified) {
+    CephContext *cct = image_ctx.cct;
+    if (notified) {
+      if (r == -ETIMEDOUT) {
+        // don't fail the op if a peer fails to get the update notification
+        lderr(cct) << "update notification timed-out" << dendl;
+        r = 0;
+      } else if (r < 0) {
+        lderr(cct) << "update notification failed: " << cpp_strerror(r)
+                   << dendl;
+      }
       Context::complete(r);
-    } else {
-      notified = true;
-      image_ctx.notify_update(this);
+      return;
     }
+
+    if (r < 0) {
+      // op failed -- no need to send update notification
+      Context::complete(r);
+      return;
+    }
+
+    notified = true;
+    image_ctx.notify_update(this);
   }
   virtual void finish(int r) override {
     on_finish->complete(r);
diff --git a/src/librbd/exclusive_lock/ReleaseRequest.cc b/src/librbd/exclusive_lock/ReleaseRequest.cc
index 356beb1..0583c26 100644
--- a/src/librbd/exclusive_lock/ReleaseRequest.cc
+++ b/src/librbd/exclusive_lock/ReleaseRequest.cc
@@ -50,58 +50,58 @@ ReleaseRequest<I>::~ReleaseRequest() {
 
 template <typename I>
 void ReleaseRequest<I>::send() {
-  send_block_writes();
+  send_cancel_op_requests();
 }
 
 template <typename I>
-void ReleaseRequest<I>::send_block_writes() {
+void ReleaseRequest<I>::send_cancel_op_requests() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << dendl;
 
   using klass = ReleaseRequest<I>;
   Context *ctx = create_context_callback<
-    klass, &klass::handle_block_writes>(this);
-
-  {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-    if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
-      m_image_ctx.aio_work_queue->set_require_lock_on_read();
-    }
-    m_image_ctx.aio_work_queue->block_writes(ctx);
-  }
+    klass, &klass::handle_cancel_op_requests>(this);
+  m_image_ctx.cancel_async_requests(ctx);
 }
 
 template <typename I>
-Context *ReleaseRequest<I>::handle_block_writes(int *ret_val) {
+Context *ReleaseRequest<I>::handle_cancel_op_requests(int *ret_val) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
 
-  if (*ret_val < 0) {
-    m_image_ctx.aio_work_queue->unblock_writes();
-    return m_on_finish;
-  }
+  assert(*ret_val == 0);
 
-  send_cancel_op_requests();
+  send_block_writes();
   return nullptr;
 }
 
 template <typename I>
-void ReleaseRequest<I>::send_cancel_op_requests() {
+void ReleaseRequest<I>::send_block_writes() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << dendl;
 
   using klass = ReleaseRequest<I>;
   Context *ctx = create_context_callback<
-    klass, &klass::handle_cancel_op_requests>(this);
-  m_image_ctx.cancel_async_requests(ctx);
+    klass, &klass::handle_block_writes>(this);
+
+  {
+    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
+      m_image_ctx.aio_work_queue->set_require_lock_on_read();
+    }
+    m_image_ctx.aio_work_queue->block_writes(ctx);
+  }
 }
 
 template <typename I>
-Context *ReleaseRequest<I>::handle_cancel_op_requests(int *ret_val) {
+Context *ReleaseRequest<I>::handle_block_writes(int *ret_val) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
 
-  assert(*ret_val == 0);
+  if (*ret_val < 0) {
+    m_image_ctx.aio_work_queue->unblock_writes();
+    return m_on_finish;
+  }
 
   if (m_on_releasing != nullptr) {
     // alert caller that we no longer own the exclusive lock
diff --git a/src/librbd/exclusive_lock/ReleaseRequest.h b/src/librbd/exclusive_lock/ReleaseRequest.h
index c5bf91c..8712bc9 100644
--- a/src/librbd/exclusive_lock/ReleaseRequest.h
+++ b/src/librbd/exclusive_lock/ReleaseRequest.h
@@ -33,10 +33,10 @@ private:
    * <start>
    *    |
    *    v
-   * BLOCK_WRITES
+   * CANCEL_OP_REQUESTS
    *    |
    *    v
-   * CANCEL_OP_REQUESTS
+   * BLOCK_WRITES
    *    |
    *    v
    * FLUSH_NOTIFIES . . . . . . . . . . . . . .
@@ -67,12 +67,12 @@ private:
   decltype(m_image_ctx.object_map) m_object_map;
   decltype(m_image_ctx.journal) m_journal;
 
-  void send_block_writes();
-  Context *handle_block_writes(int *ret_val);
-
   void send_cancel_op_requests();
   Context *handle_cancel_op_requests(int *ret_val);
 
+  void send_block_writes();
+  Context *handle_block_writes(int *ret_val);
+
   void send_flush_notifies();
   Context *handle_flush_notifies(int *ret_val);
 
diff --git a/src/librbd/image/CloseRequest.cc b/src/librbd/image/CloseRequest.cc
index 55e25ab..b953860 100644
--- a/src/librbd/image/CloseRequest.cc
+++ b/src/librbd/image/CloseRequest.cc
@@ -87,9 +87,10 @@ template <typename I>
 void CloseRequest<I>::send_shut_down_exclusive_lock() {
   {
     RWLock::WLocker owner_locker(m_image_ctx->owner_lock);
-    RWLock::WLocker snap_locker(m_image_ctx->snap_lock);
-    std::swap(m_exclusive_lock, m_image_ctx->exclusive_lock);
+    m_exclusive_lock = m_image_ctx->exclusive_lock;
 
+    // if reading a snapshot -- possible object map is open
+    RWLock::WLocker snap_locker(m_image_ctx->snap_lock);
     if (m_exclusive_lock == nullptr) {
       delete m_image_ctx->object_map;
       m_image_ctx->object_map = nullptr;
@@ -104,6 +105,8 @@ void CloseRequest<I>::send_shut_down_exclusive_lock() {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
+  // in-flight IO will be flushed and in-flight requests will be canceled
+  // before releasing lock
   m_exclusive_lock->shut_down(create_context_callback<
     CloseRequest<I>, &CloseRequest<I>::handle_shut_down_exclusive_lock>(this));
 }
@@ -113,10 +116,18 @@ void CloseRequest<I>::handle_shut_down_exclusive_lock(int r) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
 
-  // object map and journal closed during exclusive lock shutdown
-  assert(m_image_ctx->journal == nullptr);
-  assert(m_image_ctx->object_map == nullptr);
+  {
+    RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+    assert(m_image_ctx->exclusive_lock == nullptr);
+
+    // object map and journal closed during exclusive lock shutdown
+    RWLock::RLocker snap_locker(m_image_ctx->snap_lock);
+    assert(m_image_ctx->journal == nullptr);
+    assert(m_image_ctx->object_map == nullptr);
+  }
+
   delete m_exclusive_lock;
+  m_exclusive_lock = nullptr;
 
   save_result(r);
   if (r < 0) {
diff --git a/src/librbd/image/RefreshRequest.cc b/src/librbd/image/RefreshRequest.cc
index 12bee84..ecb7cae 100644
--- a/src/librbd/image/RefreshRequest.cc
+++ b/src/librbd/image/RefreshRequest.cc
@@ -42,6 +42,7 @@ RefreshRequest<I>::~RefreshRequest() {
   assert(m_object_map == nullptr);
   assert(m_journal == nullptr);
   assert(m_refresh_parent == nullptr);
+  assert(!m_blocked_writes);
 }
 
 template <typename I>
@@ -459,7 +460,7 @@ Context *RefreshRequest<I>::send_v2_open_journal() {
         m_image_ctx.journal == nullptr) {
       m_image_ctx.aio_work_queue->set_require_lock_on_read();
     }
-    return send_v2_finalize_refresh_parent();
+    return send_v2_block_writes();
   }
 
   // implies journal dynamically enabled since ExclusiveLock will init
@@ -488,6 +489,47 @@ Context *RefreshRequest<I>::handle_v2_open_journal(int *result) {
     save_result(result);
   }
 
+  return send_v2_block_writes();
+}
+
+template <typename I>
+Context *RefreshRequest<I>::send_v2_block_writes() {
+  bool disabled_journaling = false;
+  {
+    RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+    disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
+                           (m_features & RBD_FEATURE_JOURNALING) == 0 &&
+                           m_image_ctx.journal != nullptr);
+  }
+
+  if (!disabled_journaling) {
+    return send_v2_finalize_refresh_parent();
+  }
+
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << this << " " << __func__ << dendl;
+
+  // we need to block writes temporarily to avoid in-flight journal
+  // writes
+  m_blocked_writes = true;
+  Context *ctx = create_context_callback<
+    RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this);
+
+  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  m_image_ctx.aio_work_queue->block_writes(ctx);
+  return nullptr;
+}
+
+template <typename I>
+Context *RefreshRequest<I>::handle_v2_block_writes(int *result) {
+  CephContext *cct = m_image_ctx.cct;
+  ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
+
+  if (*result < 0) {
+    lderr(cct) << "failed to block writes: " << cpp_strerror(*result)
+               << dendl;
+    save_result(result);
+  }
   return send_v2_finalize_refresh_parent();
 }
 
@@ -581,7 +623,8 @@ Context *RefreshRequest<I>::send_v2_shut_down_exclusive_lock() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
-  // exclusive lock feature was dynamically disabled
+  // exclusive lock feature was dynamically disabled. in-flight IO will be
+  // flushed and in-flight requests will be canceled before releasing lock
   using klass = RefreshRequest<I>;
   Context *ctx = create_context_callback<
     klass, &klass::handle_v2_shut_down_exclusive_lock>(this);
@@ -600,6 +643,11 @@ Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) {
     save_result(result);
   }
 
+  {
+    RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+    assert(m_image_ctx.exclusive_lock == nullptr);
+  }
+
   assert(m_exclusive_lock != nullptr);
   delete m_exclusive_lock;
   m_exclusive_lock = nullptr;
@@ -639,6 +687,10 @@ Context *RefreshRequest<I>::handle_v2_close_journal(int *result) {
   delete m_journal;
   m_journal = nullptr;
 
+  assert(m_blocked_writes);
+  m_blocked_writes = false;
+
+  m_image_ctx.aio_work_queue->unblock_writes();
   return send_v2_close_object_map();
 }
 
@@ -784,9 +836,11 @@ void RefreshRequest<I>::apply() {
                                    m_image_ctx.snap_lock)) {
       // disabling exclusive lock will automatically handle closing
       // object map and journaling
-      std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock);
+      assert(m_exclusive_lock == nullptr);
+      m_exclusive_lock = m_image_ctx.exclusive_lock;
     } else {
       if (m_exclusive_lock != nullptr) {
+        assert(m_image_ctx.exclusive_lock == nullptr);
         std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock);
       }
       if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
diff --git a/src/librbd/image/RefreshRequest.h b/src/librbd/image/RefreshRequest.h
index 8a08696..71fe509 100644
--- a/src/librbd/image/RefreshRequest.h
+++ b/src/librbd/image/RefreshRequest.h
@@ -67,6 +67,9 @@ private:
    *            V2_OPEN_JOURNAL (skip if journal              |
    *                |            active or disabled)          |
    *                v                                         |
+   *            V2_BLOCK_WRITES (skip if journal not          |
+   *                |            disabled)                    |
+   *                v                                         |
    *             <apply>                                      |
    *                |                                         |
    *                v                                         |
@@ -125,6 +128,8 @@ private:
   std::string m_lock_tag;
   bool m_exclusive_locked;
 
+  bool m_blocked_writes = false;
+
   void send_v1_read_header();
   Context *handle_v1_read_header(int *result);
 
@@ -152,6 +157,9 @@ private:
   Context *send_v2_open_journal();
   Context *handle_v2_open_journal(int *result);
 
+  Context *send_v2_block_writes();
+  Context *handle_v2_block_writes(int *result);
+
   Context *send_v2_open_object_map();
   Context *handle_v2_open_object_map(int *result);
 
diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc
index 983e622..5276052 100644
--- a/src/librbd/internal.cc
+++ b/src/librbd/internal.cc
@@ -1608,6 +1608,21 @@ remove_mirroring_image:
         return -EINVAL;
       }
 
+      // if disabling features w/ exclusive lock supported, we need to
+      // acquire the lock to temporarily block IO against the image
+      if (ictx->exclusive_lock != nullptr && !enabled) {
+        C_SaferCond lock_ctx;
+        ictx->exclusive_lock->request_lock(&lock_ctx);
+        r = lock_ctx.wait();
+        if (r < 0) {
+          lderr(cct) << "failed to lock image: " << cpp_strerror(r) << dendl;
+          return r;
+        } else if (!ictx->exclusive_lock->is_lock_owner()) {
+          lderr(cct) << "failed to acquire exclusive lock" << dendl;
+          return -EROFS;
+        }
+      }
+
       RWLock::WLocker snap_locker(ictx->snap_lock);
       uint64_t new_features;
       if (enabled) {
diff --git a/src/mds/FSMap.cc b/src/mds/FSMap.cc
index 73e425d..a20bb09 100644
--- a/src/mds/FSMap.cc
+++ b/src/mds/FSMap.cc
@@ -35,7 +35,7 @@ void FSMap::dump(Formatter *f) const
   compat.dump(f);
   f->close_section();
 
-  f->open_object_section("feature flags");
+  f->open_object_section("feature_flags");
   f->dump_bool("enable_multiple", enable_multiple);
   f->dump_bool("ever_enabled_multiple", ever_enabled_multiple);
   f->close_section();
@@ -440,7 +440,9 @@ void FSMap::decode(bufferlist::iterator& p)
     ::decode(mds_roles, p);
     ::decode(standby_daemons, p);
     ::decode(standby_epochs, p);
-    ::decode(ever_enabled_multiple, p);
+    if (struct_v >= 7) {
+      ::decode(ever_enabled_multiple, p);
+    }
   }
 
   DECODE_FINISH(p);
diff --git a/src/msg/async/AsyncConnection.cc b/src/msg/async/AsyncConnection.cc
index 611e498..7ae8187 100644
--- a/src/msg/async/AsyncConnection.cc
+++ b/src/msg/async/AsyncConnection.cc
@@ -178,7 +178,7 @@ static void alloc_aligned_buffer(bufferlist& data, unsigned len, unsigned off)
 AsyncConnection::AsyncConnection(CephContext *cct, AsyncMessenger *m, EventCenter *c, PerfCounters *p)
   : Connection(cct, m), async_msgr(m), logger(p), global_seq(0), connect_seq(0), peer_global_seq(0),
     out_seq(0), ack_left(0), in_seq(0), state(STATE_NONE), state_after_send(0), sd(-1), port(-1),
-    write_lock("AsyncConnection::write_lock"), can_write(NOWRITE),
+    write_lock("AsyncConnection::write_lock"), can_write(WriteStatus::NOWRITE),
     open_write(false), keepalive(false), lock("AsyncConnection::lock"), recv_buf(NULL),
     recv_max_prefetch(MIN(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)),
     recv_start(0), recv_end(0), got_bad_auth(false), authorizer(NULL), replacing(false),
@@ -1349,7 +1349,7 @@ ssize_t AsyncConnection::_process_connection()
         // message may in queue between last _try_send and connection ready
         // write event may already notify and we need to force scheduler again
         write_lock.Lock();
-        can_write = CANWRITE;
+        can_write = WriteStatus::CANWRITE;
         if (is_queued())
           center->dispatch_event_external(write_handler);
         write_lock.Unlock();
@@ -1511,7 +1511,7 @@ ssize_t AsyncConnection::_process_connection()
         state = STATE_OPEN;
         memset(&connect_msg, 0, sizeof(connect_msg));
         write_lock.Lock();
-        can_write = CANWRITE;
+        can_write = WriteStatus::CANWRITE;
         if (is_queued())
           center->dispatch_event_external(write_handler);
         write_lock.Unlock();
@@ -1816,7 +1816,7 @@ ssize_t AsyncConnection::handle_connect_msg(ceph_msg_connect &connect, bufferlis
     ldout(async_msgr->cct, 1) << __func__ << " replacing on lossy channel, failing existing" << dendl;
     existing->_stop();
   } else {
-    assert(can_write == NOWRITE);
+    assert(can_write == WriteStatus::NOWRITE);
     existing->write_lock.Lock(true);
     // queue a reset on the new connection, which we're dumping for the old
     center->dispatch_event_external(reset_handler);
@@ -1842,7 +1842,7 @@ ssize_t AsyncConnection::handle_connect_msg(ceph_msg_connect &connect, bufferlis
     existing->requeue_sent();
 
     swap(existing->sd, sd);
-    existing->can_write = NOWRITE;
+    existing->can_write = WriteStatus::NOWRITE;
     existing->open_write = false;
     existing->replacing = true;
     existing->state_offset = 0;
@@ -2011,7 +2011,7 @@ int AsyncConnection::send_message(Message *m)
   if (async_msgr->get_myaddr() == get_peer_addr()) { //loopback connection
     ldout(async_msgr->cct, 20) << __func__ << " " << *m << " local" << dendl;
     Mutex::Locker l(write_lock);
-    if (can_write != CLOSED) {
+    if (can_write != WriteStatus::CLOSED) {
       local_messages.push_back(m);
       center->dispatch_event_external(local_deliver_handler);
     } else {
@@ -2037,14 +2037,14 @@ int AsyncConnection::send_message(Message *m)
 
   Mutex::Locker l(write_lock);
   // "features" changes will change the payload encoding
-  if (can_fast_prepare && (can_write == NOWRITE || get_features() != f)) {
+  if (can_fast_prepare && (can_write == WriteStatus::NOWRITE || get_features() != f)) {
     // ensure the correctness of message encoding
     bl.clear();
     m->get_payload().clear();
-    ldout(async_msgr->cct, 5) << __func__ << " clear encoded buffer, can_write=" << can_write << " previous "
+    ldout(async_msgr->cct, 5) << __func__ << " clear encoded buffer previous "
                               << f << " != " << get_features() << dendl;
   }
-  if (!is_queued() && can_write == CANWRITE && async_msgr->cct->_conf->ms_async_send_inline) {
+  if (!is_queued() && can_write == WriteStatus::CANWRITE && async_msgr->cct->_conf->ms_async_send_inline) {
     if (!can_fast_prepare)
       prepare_send_message(get_features(), m, bl);
     logger->inc(l_msgr_send_messages_inline);
@@ -2053,7 +2053,7 @@ int AsyncConnection::send_message(Message *m)
       // we want to handle fault within internal thread
       center->dispatch_event_external(write_handler);
     }
-  } else if (can_write == CLOSED) {
+  } else if (can_write == WriteStatus::CLOSED) {
     ldout(async_msgr->cct, 10) << __func__ << " connection closed."
                                << " Drop message " << m << dendl;
     m->put();
@@ -2163,7 +2163,7 @@ void AsyncConnection::fault()
     ::close(sd);
     sd = -1;
   }
-  can_write = NOWRITE;
+  can_write = WriteStatus::NOWRITE;
   open_write = false;
 
   // requeue sent items
@@ -2238,7 +2238,7 @@ void AsyncConnection::was_session_reset()
   // it's safe to directly set 0, double locked
   ack_left.set(0);
   once_ready = false;
-  can_write = NOWRITE;
+  can_write = WriteStatus::NOWRITE;
 }
 
 void AsyncConnection::_stop()
@@ -2257,7 +2257,7 @@ void AsyncConnection::_stop()
 
   state = STATE_CLOSED;
   open_write = false;
-  can_write = CLOSED;
+  can_write = WriteStatus::CLOSED;
   state_offset = 0;
   if (sd >= 0) {
     shutdown_socket();
@@ -2293,7 +2293,7 @@ void AsyncConnection::prepare_send_message(uint64_t features, Message *m, buffer
 
 ssize_t AsyncConnection::write_message(Message *m, bufferlist& bl, bool more)
 {
-  assert(can_write == CANWRITE);
+  assert(can_write == WriteStatus::CANWRITE);
   m->set_seq(out_seq.inc());
 
   if (!policy.lossy) {
@@ -2412,7 +2412,7 @@ void AsyncConnection::send_keepalive()
 {
   ldout(async_msgr->cct, 10) << __func__ << " started." << dendl;
   Mutex::Locker l(write_lock);
-  if (can_write != CLOSED) {
+  if (can_write != WriteStatus::CLOSED) {
     keepalive = true;
     center->dispatch_event_external(write_handler);
   }
@@ -2454,7 +2454,7 @@ void AsyncConnection::handle_write()
   ssize_t r = 0;
 
   write_lock.Lock();
-  if (can_write == CANWRITE) {
+  if (can_write == WriteStatus::CANWRITE) {
     if (keepalive) {
       _send_keepalive_or_ack();
       keepalive = false;
diff --git a/src/msg/async/AsyncConnection.h b/src/msg/async/AsyncConnection.h
index 4a76a7c..608ccce 100644
--- a/src/msg/async/AsyncConnection.h
+++ b/src/msg/async/AsyncConnection.h
@@ -17,6 +17,7 @@
 #ifndef CEPH_MSG_ASYNCCONNECTION_H
 #define CEPH_MSG_ASYNCCONNECTION_H
 
+#include <atomic>
 #include <pthread.h>
 #include <signal.h>
 #include <climits>
@@ -130,8 +131,7 @@ class AsyncConnection : public Connection {
   ostream& _conn_prefix(std::ostream *_dout);
 
   bool is_connected() override {
-    Mutex::Locker l(lock);
-    return state >= STATE_OPEN && state <= STATE_OPEN_TAG_CLOSE;
+    return can_write.load() == WriteStatus::CANWRITE;
   }
 
   // Only call when AsyncConnection first construct
@@ -240,11 +240,12 @@ class AsyncConnection : public Connection {
   Messenger::Policy policy;
 
   Mutex write_lock;
-  enum {
+  enum class WriteStatus {
     NOWRITE,
     CANWRITE,
     CLOSED
-  } can_write;
+  };
+  std::atomic<WriteStatus> can_write;
   bool open_write;
   map<int, list<pair<bufferlist, Message*> > > out_q;  // priority queue for outbound msgs
   list<Message*> sent; // the first bufferlist need to inject seq
diff --git a/src/msg/simple/DispatchQueue.cc b/src/msg/simple/DispatchQueue.cc
index fed198e..be46c36 100644
--- a/src/msg/simple/DispatchQueue.cc
+++ b/src/msg/simple/DispatchQueue.cc
@@ -150,6 +150,15 @@ void DispatchQueue::entry()
       lock.Unlock();
 
       if (qitem.is_code()) {
+	if (cct->_conf->ms_inject_internal_delays &&
+	    cct->_conf->ms_inject_delay_probability &&
+	    (rand() % 10000)/10000.0 < cct->_conf->ms_inject_delay_probability) {
+	  utime_t t;
+	  t.set_from_double(cct->_conf->ms_inject_internal_delays);
+	  ldout(cct, 1) << "DispatchQueue::entry  inject delay of " << t
+			<< dendl;
+	  t.sleep();
+	}
 	switch (qitem.get_code()) {
 	case D_BAD_REMOTE_RESET:
 	  msgr->ms_deliver_handle_remote_reset(qitem.get_connection());
diff --git a/src/msg/xio/XioConnection.cc b/src/msg/xio/XioConnection.cc
index 40069fa..fac8cde 100644
--- a/src/msg/xio/XioConnection.cc
+++ b/src/msg/xio/XioConnection.cc
@@ -196,40 +196,40 @@ static inline XioDispatchHook* pool_alloc_xio_dispatch_hook(
   return xhook;
 }
 
-int XioConnection::on_msg_req(struct xio_session *session,
-			      struct xio_msg *req,
+int XioConnection::on_msg(struct xio_session *session,
+			      struct xio_msg *msg,
 			      int more_in_batch,
 			      void *cb_user_context)
 {
-  struct xio_msg *treq = req;
+  struct xio_msg *tmsg = msg;
 
   /* XXX Accelio guarantees message ordering at
    * xio_session */
 
   if (! in_seq.p()) {
-    if (!treq->in.header.iov_len) {
+    if (!tmsg->in.header.iov_len) {
 	ldout(msgr->cct,0) << __func__ << " empty header: packet out of sequence?" << dendl;
-	xio_release_msg(req);
+	xio_release_msg(msg);
 	return 0;
     }
     XioMsgCnt msg_cnt(
-      buffer::create_static(treq->in.header.iov_len,
-			    (char*) treq->in.header.iov_base));
-    ldout(msgr->cct,10) << __func__ << " receive req " << "treq " << treq
+      buffer::create_static(tmsg->in.header.iov_len,
+			    (char*) tmsg->in.header.iov_base));
+    ldout(msgr->cct,10) << __func__ << " receive msg " << "tmsg " << tmsg
       << " msg_cnt " << msg_cnt.msg_cnt
-      << " iov_base " << treq->in.header.iov_base
-      << " iov_len " << (int) treq->in.header.iov_len
-      << " nents " << treq->in.pdata_iov.nents
+      << " iov_base " << tmsg->in.header.iov_base
+      << " iov_len " << (int) tmsg->in.header.iov_len
+      << " nents " << tmsg->in.pdata_iov.nents
       << " conn " << conn << " sess " << session
-      << " sn " << treq->sn << dendl;
+      << " sn " << tmsg->sn << dendl;
     assert(session == this->session);
     in_seq.set_count(msg_cnt.msg_cnt);
   } else {
     /* XXX major sequence error */
-    assert(! treq->in.header.iov_len);
+    assert(! tmsg->in.header.iov_len);
   }
 
-  in_seq.append(req);
+  in_seq.append(msg);
   if (in_seq.count() > 0) {
     return 0;
   }
@@ -250,14 +250,14 @@ int XioConnection::on_msg_req(struct xio_session *session,
     dendl;
 
   struct xio_msg* msg_iter = msg_seq.begin();
-  treq = msg_iter;
+  tmsg = msg_iter;
   XioMsgHdr hdr(header, footer,
-		buffer::create_static(treq->in.header.iov_len,
-				      (char*) treq->in.header.iov_base));
+		buffer::create_static(tmsg->in.header.iov_len,
+				      (char*) tmsg->in.header.iov_base));
 
   if (magic & (MSG_MAGIC_TRACE_XCON)) {
     if (hdr.hdr->type == 43) {
-      print_xio_msg_hdr(msgr->cct, "on_msg_req", hdr, NULL);
+      print_xio_msg_hdr(msgr->cct, "on_msg", hdr, NULL);
     }
   }
 
@@ -270,9 +270,9 @@ int XioConnection::on_msg_req(struct xio_session *session,
   blen = header.front_len;
 
   while (blen && (msg_iter != msg_seq.end())) {
-    treq = msg_iter;
-    iov_len = vmsg_sglist_nents(&treq->in);
-    iovs = vmsg_sglist(&treq->in);
+    tmsg = msg_iter;
+    iov_len = vmsg_sglist_nents(&tmsg->in);
+    iovs = vmsg_sglist(&tmsg->in);
     for (; blen && (ix < iov_len); ++ix) {
       msg_iov = &iovs[ix];
 
@@ -317,9 +317,9 @@ int XioConnection::on_msg_req(struct xio_session *session,
   }
 
   while (blen && (msg_iter != msg_seq.end())) {
-    treq = msg_iter;
-    iov_len = vmsg_sglist_nents(&treq->in);
-    iovs = vmsg_sglist(&treq->in);
+    tmsg = msg_iter;
+    iov_len = vmsg_sglist_nents(&tmsg->in);
+    iovs = vmsg_sglist(&tmsg->in);
     for (; blen && (ix < iov_len); ++ix) {
       msg_iov = &iovs[ix];
       take_len = MIN(blen, msg_iov->iov_len);
@@ -349,9 +349,9 @@ int XioConnection::on_msg_req(struct xio_session *session,
   }
 
   while (blen && (msg_iter != msg_seq.end())) {
-    treq = msg_iter;
-    iov_len = vmsg_sglist_nents(&treq->in);
-    iovs = vmsg_sglist(&treq->in);
+    tmsg = msg_iter;
+    iov_len = vmsg_sglist_nents(&tmsg->in);
+    iovs = vmsg_sglist(&tmsg->in);
     for (; blen && (ix < iov_len); ++ix) {
       msg_iov = &iovs[ix];
       data.append(
@@ -366,7 +366,7 @@ int XioConnection::on_msg_req(struct xio_session *session,
   }
 
   /* update connection timestamp */
-  recv.set(treq->timestamp);
+  recv.set(tmsg->timestamp);
 
   Message *m =
     decode_message(msgr->cct, msgr->crcflags, header, footer, payload, middle,
@@ -475,7 +475,7 @@ void XioConnection::msg_release_fail(struct xio_msg *msg, int code)
     " (" << xio_strerror(code) << ")" << dendl;
 } /* msg_release_fail */
 
-int XioConnection::flush_input_queue(uint32_t flags) {
+int XioConnection::flush_out_queues(uint32_t flags) {
   XioMessenger* msgr = static_cast<XioMessenger*>(get_messenger());
   if (! (flags & CState::OP_FLAG_LOCKED))
     pthread_spin_lock(&sp);
@@ -540,7 +540,7 @@ int XioConnection::discard_input_queue(uint32_t flags)
 	break;
       case XioSubmit::INCOMING_MSG_RELEASE:
 	deferred_q.erase(q_iter);
-	portal->release_xio_rsp(static_cast<XioRsp*>(xs));
+	portal->release_xio_msg(static_cast<XioRsp*>(xs));
 	break;
       default:
 	ldout(msgr->cct,0) << __func__ << ": Unknown Msg type " << xs->type << dendl;
@@ -638,7 +638,7 @@ int XioConnection::CState::state_up_ready(uint32_t flags)
   if (! (flags & CState::OP_FLAG_LOCKED))
     pthread_spin_lock(&xcon->sp);
 
-  xcon->flush_input_queue(flags|CState::OP_FLAG_LOCKED);
+  xcon->flush_out_queues(flags|CState::OP_FLAG_LOCKED);
 
   session_state.set(UP);
   startup_state.set(READY);
diff --git a/src/msg/xio/XioConnection.h b/src/msg/xio/XioConnection.h
index 383f57f..e55ea98 100644
--- a/src/msg/xio/XioConnection.h
+++ b/src/msg/xio/XioConnection.h
@@ -311,7 +311,7 @@ public:
 
   int passive_setup(); /* XXX */
 
-  int on_msg_req(struct xio_session *session, struct xio_msg *req,
+  int on_msg(struct xio_session *session, struct xio_msg *msg,
 		 int more_in_batch, void *cb_user_context);
   int on_ow_msg_send_complete(struct xio_session *session, struct xio_msg *msg,
 			      void *conn_user_context);
@@ -319,7 +319,7 @@ public:
 		   struct xio_msg  *msg, void *conn_user_context);
   void msg_send_fail(XioMsg *xmsg, int code);
   void msg_release_fail(struct xio_msg *msg, int code);
-  int flush_input_queue(uint32_t flags);
+  int flush_out_queues(uint32_t flags);
   int discard_input_queue(uint32_t flags);
   int adjust_clru(uint32_t flags);
 };
diff --git a/src/msg/xio/XioMessenger.cc b/src/msg/xio/XioMessenger.cc
index e3afde0..f1752b6 100644
--- a/src/msg/xio/XioMessenger.cc
+++ b/src/msg/xio/XioMessenger.cc
@@ -145,7 +145,7 @@ static int on_msg(struct xio_session *session,
     }
   }
 
-  return xcon->on_msg_req(session, req, more_in_batch,
+  return xcon->on_msg(session, req, more_in_batch,
 			  cb_user_context);
 }
 
@@ -583,7 +583,7 @@ enum bl_type
 #define MAX_XIO_BUF_SIZE 1044480
 
 static inline int
-xio_count_buffers(buffer::list& bl, int& req_size, int& msg_off, int& req_off)
+xio_count_buffers(const buffer::list& bl, int& req_size, int& msg_off, int& req_off)
 {
 
   const std::list<buffer::ptr>& buffers = bl.buffers();
@@ -628,7 +628,7 @@ xio_count_buffers(buffer::list& bl, int& req_size, int& msg_off, int& req_off)
 }
 
 static inline void
-xio_place_buffers(buffer::list& bl, XioMsg *xmsg, struct xio_msg*& req,
+xio_place_buffers(const buffer::list& bl, XioMsg *xmsg, struct xio_msg*& req,
 		  struct xio_iovec_ex*& msg_iov, int& req_size,
 		  int ex_cnt, int& msg_off, int& req_off, bl_type type)
 {
diff --git a/src/msg/xio/XioPortal.h b/src/msg/xio/XioPortal.h
index 04e16d2..344e96d 100644
--- a/src/msg/xio/XioPortal.h
+++ b/src/msg/xio/XioPortal.h
@@ -157,7 +157,7 @@ public:
   int bind(struct xio_session_ops *ops, const string &base_uri,
 	   uint16_t port, uint16_t *assigned_port);
 
-  inline void release_xio_rsp(XioRsp* xrsp) {
+  inline void release_xio_msg(XioRsp* xrsp) {
     struct xio_msg *msg = xrsp->dequeue();
     struct xio_msg *next_msg = NULL;
     int code;
@@ -193,7 +193,7 @@ public:
 	break;
       default:
 	/* INCOMING_MSG_RELEASE */
-	release_xio_rsp(static_cast<XioRsp*>(xs));
+	release_xio_msg(static_cast<XioRsp*>(xs));
       break;
       };
     }
@@ -321,7 +321,7 @@ public:
 	    default:
 	      /* INCOMING_MSG_RELEASE */
 	      q_iter = send_q.erase(q_iter);
-	      release_xio_rsp(static_cast<XioRsp*>(xs));
+	      release_xio_msg(static_cast<XioRsp*>(xs));
 	      continue;
 	    } /* switch (xs->type) */
 	    q_iter = send_q.erase(q_iter);
diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc
index 451262d..553bf10 100644
--- a/src/os/bluestore/BlueStore.cc
+++ b/src/os/bluestore/BlueStore.cc
@@ -1215,24 +1215,6 @@ int BlueStore::_open_db(bool create)
       bluefs_extents.insert(BLUEFS_START, initial);
     }
 
-    // use a short, relative path, if it's bluefs.
-    strcpy(fn, "db");
-
-    if (bluefs_shared_bdev == BlueFS::BDEV_SLOW) {
-      // we have both block.db and block; tell rocksdb!
-      // note: the second (last) size value doesn't really matter
-      char db_paths[PATH_MAX*3];
-      snprintf(
-	db_paths, sizeof(db_paths), "db,%lld db.slow,%lld",
-	(unsigned long long)bluefs->get_block_device_size(BlueFS::BDEV_DB) *
-	 95 / 100,
-	(unsigned long long)bluefs->get_block_device_size(BlueFS::BDEV_SLOW) *
-	 95 / 100);
-      g_conf->set_val("rocksdb_db_paths", db_paths, false, false);
-      dout(10) << __func__ << " set rocksdb_db_paths to "
-	       << g_conf->rocksdb_db_paths << dendl;
-    }
-
     snprintf(bfn, sizeof(bfn), "%s/block.wal", path.c_str());
     if (::stat(bfn, &st) == 0) {
       r = bluefs->add_block_device(BlueFS::BDEV_WAL, bfn);
@@ -1288,6 +1270,23 @@ int BlueStore::_open_db(bool create)
       strcpy(fn, "db");
     }
 
+    if (bluefs_shared_bdev == BlueFS::BDEV_SLOW) {
+      // we have both block.db and block; tell rocksdb!
+      // note: the second (last) size value doesn't really matter
+      char db_paths[PATH_MAX*3];
+      snprintf(
+	db_paths, sizeof(db_paths), "%s,%lld %s.slow,%lld",
+	fn,
+	(unsigned long long)bluefs->get_block_device_size(BlueFS::BDEV_DB) *
+	 95 / 100,
+	fn,
+	(unsigned long long)bluefs->get_block_device_size(BlueFS::BDEV_SLOW) *
+	 95 / 100);
+      g_conf->set_val("rocksdb_db_paths", db_paths, false, false);
+      dout(10) << __func__ << " set rocksdb_db_paths to "
+	       << g_conf->rocksdb_db_paths << dendl;
+    }
+
     if (create) {
       env->CreateDir(fn);
       if (g_conf->rocksdb_separate_wal_dir)
diff --git a/src/os/filestore/LFNIndex.cc b/src/os/filestore/LFNIndex.cc
index 1994d5a..3b33513 100644
--- a/src/os/filestore/LFNIndex.cc
+++ b/src/os/filestore/LFNIndex.cc
@@ -1041,13 +1041,13 @@ static int parse_object(const char *s, ghobject_t& o)
 
 int LFNIndex::lfn_parse_object_name_keyless(const string &long_name, ghobject_t *out)
 {
-  bool r = parse_object(long_name.c_str(), *out);
+  int r = parse_object(long_name.c_str(), *out);
   int64_t pool = -1;
   spg_t pg;
   if (coll().is_pg_prefix(&pg))
     pool = (int64_t)pg.pgid.pool();
   out->hobj.pool = pool;
-  if (!r) return r;
+  if (!r) return -EINVAL;
   string temp = lfn_generate_object_name(*out);
   return r ? 0 : -EINVAL;
 }
diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc
index 7c260ff..66aebb7 100644
--- a/src/osd/OSD.cc
+++ b/src/osd/OSD.cc
@@ -631,7 +631,8 @@ void OSDService::promote_throttle_recalibrate()
     unsigned po = (double)target_obj_sec * dur * 1000.0 / (double)attempts;
     unsigned pb = (double)target_bytes_sec / (double)avg_size * dur * 1000.0
       / (double)attempts;
-    derr << __func__ << "  po " << po << " pb " << pb << " avg_size " << avg_size << dendl;
+    dout(20) << __func__ << "  po " << po << " pb " << pb << " avg_size "
+	     << avg_size << dendl;
     if (target_obj_sec && target_bytes_sec)
       new_prob = MIN(po, pb);
     else if (target_obj_sec)
@@ -2022,6 +2023,31 @@ int OSD::init()
   int rotating_auth_attempts = 0;
   const int max_rotating_auth_attempts = 10;
 
+  // sanity check long object name handling
+  {
+    hobject_t l;
+    l.oid.name = string(g_conf->osd_max_object_name_len, 'n');
+    l.set_key(string(g_conf->osd_max_object_name_len, 'k'));
+    l.nspace = string(g_conf->osd_max_object_namespace_len, 's');
+    r = store->validate_hobject_key(l);
+    if (r < 0) {
+      derr << "backend (" << store->get_type() << ") is unable to support max "
+	   << "object name[space] len" << dendl;
+      derr << "   osd max object name len = "
+	   << g_conf->osd_max_object_name_len << dendl;
+      derr << "   osd max object namespace len = "
+	   << g_conf->osd_max_object_namespace_len << dendl;
+      derr << cpp_strerror(r) << dendl;
+      if (g_conf->osd_check_max_object_name_len_on_startup) {
+	goto out;
+      }
+      derr << "osd_check_max_object_name_len_on_startup = false, starting anyway"
+	   << dendl;
+    } else {
+      dout(20) << "configured osd_max_object_name[space]_len looks ok" << dendl;
+    }
+  }
+
   // read superblock
   r = read_superblock();
   if (r < 0) {
@@ -4662,7 +4688,7 @@ bool OSD::ms_handle_reset(Connection *con)
   dout(1) << "ms_handle_reset con " << con << " session " << session << dendl;
   if (!session)
     return false;
-  session->wstate.reset();
+  session->wstate.reset(con);
   session->con.reset(NULL);  // break con <-> session ref cycle
   session_handle_reset(session);
   session->put();
@@ -6692,9 +6718,11 @@ void OSD::handle_osd_map(MOSDMap *m)
 
   if (superblock.oldest_map) {
     int num = 0;
-    epoch_t min(
-      MIN(m->oldest_map,
-	  service.map_cache.cached_key_lower_bound()));
+    epoch_t cache_lb = service.map_cache.cached_key_lower_bound();
+    epoch_t min = MIN(m->oldest_map, cache_lb);
+    dout(20) << __func__ << " oldest_map " << m->oldest_map
+	     << " cache lb " << cache_lb
+	     << " min " << min << dendl;
     for (epoch_t e = superblock.oldest_map; e < min; ++e) {
       dout(20) << " removing old osdmap epoch " << e << dendl;
       t.remove(coll_t::meta(), get_osdmap_pobject_name(e));
diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc
index 052d6c7..be65e6e 100644
--- a/src/osd/ReplicatedPG.cc
+++ b/src/osd/ReplicatedPG.cc
@@ -8804,6 +8804,10 @@ void ReplicatedPG::handle_watch_timeout(WatchRef watch)
   ObjectContextRef obc = watch->get_obc(); // handle_watch_timeout owns this ref
   dout(10) << "handle_watch_timeout obc " << obc << dendl;
 
+  if (!is_active()) {
+    dout(10) << "handle_watch_timeout not active, no-op" << dendl;
+    return;
+  }
   if (is_degraded_or_backfilling_object(obc->obs.oi.soid)) {
     callbacks_for_degraded_object[obc->obs.oi.soid].push_back(
       watch->get_delayed_cb()
@@ -9288,8 +9292,10 @@ void ReplicatedPG::kick_object_context_blocked(ObjectContextRef obc)
     objects_blocked_on_snap_promotion.erase(i);
   }
 
-  if (obc->requeue_scrub_on_unblock)
+  if (obc->requeue_scrub_on_unblock) {
+    obc->requeue_scrub_on_unblock = false;
     requeue_scrub();
+  }
 }
 
 SnapSetContext *ReplicatedPG::create_snapset_context(const hobject_t& oid)
diff --git a/src/osd/ReplicatedPG.h b/src/osd/ReplicatedPG.h
index 5384f2d..47a6a16 100644
--- a/src/osd/ReplicatedPG.h
+++ b/src/osd/ReplicatedPG.h
@@ -567,7 +567,7 @@ public:
     }
     template <typename F>
     void register_on_success(F &&f) {
-      on_finish.emplace_back(std::move(f));
+      on_success.emplace_back(std::move(f));
     }
     template <typename F>
     void register_on_applied(F &&f) {
diff --git a/src/osd/Watch.cc b/src/osd/Watch.cc
index dc665e0..43846c0 100644
--- a/src/osd/Watch.cc
+++ b/src/osd/Watch.cc
@@ -360,10 +360,10 @@ void Watch::got_ping(utime_t t)
 void Watch::connect(ConnectionRef con, bool _will_ping)
 {
   if (conn == con) {
-    dout(10) << "connecting - already connected" << dendl;
+    dout(10) << __func__ << " con " << con << " - already connected" << dendl;
     return;
   }
-  dout(10) << "connecting" << dendl;
+  dout(10) << __func__ << " con " << con << dendl;
   conn = con;
   will_ping = _will_ping;
   OSD::Session* sessionref(static_cast<OSD::Session*>(con->get_priv()));
@@ -386,7 +386,7 @@ void Watch::connect(ConnectionRef con, bool _will_ping)
 
 void Watch::disconnect()
 {
-  dout(10) << "disconnect" << dendl;
+  dout(10) << "disconnect (con was " << conn << ")" << dendl;
   conn = ConnectionRef();
   if (!will_ping)
     register_cb();
@@ -513,7 +513,7 @@ void WatchConState::removeWatch(WatchRef watch)
   watches.erase(watch);
 }
 
-void WatchConState::reset()
+void WatchConState::reset(Connection *con)
 {
   set<WatchRef> _watches;
   {
@@ -526,7 +526,11 @@ void WatchConState::reset()
     boost::intrusive_ptr<ReplicatedPG> pg((*i)->get_pg());
     pg->lock();
     if (!(*i)->is_discarded()) {
-      (*i)->disconnect();
+      if ((*i)->is_connected(con)) {
+	(*i)->disconnect();
+      } else {
+	generic_derr << __func__ << " not still connected to " << (*i) << dendl;
+      }
     }
     pg->unlock();
   }
diff --git a/src/osd/Watch.h b/src/osd/Watch.h
index 6e4ec37..c6843ee 100644
--- a/src/osd/Watch.h
+++ b/src/osd/Watch.h
@@ -202,6 +202,9 @@ public:
   bool is_connected() {
     return conn.get() != NULL;
   }
+  bool is_connected(Connection *con) {
+    return conn.get() == con;
+  }
 
   /// NOTE: must be called with pg lock held
   ~Watch();
@@ -290,7 +293,7 @@ public:
     );
 
   /// Called on session reset, disconnects watchers
-  void reset();
+  void reset(Connection *con);
 };
 
 #endif
diff --git a/src/pybind/Makefile.am b/src/pybind/Makefile.am
index cfef3ae..9f779dd 100644
--- a/src/pybind/Makefile.am
+++ b/src/pybind/Makefile.am
@@ -6,6 +6,9 @@ CYTHON_BUILD_DIR="$(shell readlink -f $(builddir))/build"
 
 PY_DISTUTILS = \
 	mkdir -p $(CYTHON_BUILD_DIR); \
+	CC="${CC}" \
+	CXX="${CXX}" \
+	LDSHARED="${CC} -shared" \
 	CPPFLAGS="-iquote \${abs_srcdir}/include ${AM_CPPFLAGS} ${CPPFLAGS}" \
 	CFLAGS="-iquote \${abs_srcdir}/include ${AM_CFLAGS} ${PYTHON_CFLAGS}" \
 	LDFLAGS="-L\${abs_builddir}/.libs $(subst -pie,,${AM_LDFLAGS}) ${PYTHON_LDFLAGS}" \
diff --git a/src/pybind/cephfs/setup.py b/src/pybind/cephfs/setup.py
index 73488eb..371358b 100755
--- a/src/pybind/cephfs/setup.py
+++ b/src/pybind/cephfs/setup.py
@@ -43,7 +43,6 @@ setup(
         Extension("cephfs",
             ["cephfs.pyx"],
             libraries=["cephfs"],
-            language="c++"
             )
     ], build_dir=os.environ.get("CYTHON_BUILD_DIR", None), include_path=[
         os.path.join(os.path.dirname(__file__), "..", "rados")]
diff --git a/src/pybind/rados/rados.pyx b/src/pybind/rados/rados.pyx
index 1f2a049..84f0e33 100644
--- a/src/pybind/rados/rados.pyx
+++ b/src/pybind/rados/rados.pyx
@@ -2799,7 +2799,9 @@ returned %d, but should return zero on success." % (self.name, ret))
             int _flags = flags
 
         with nogil:
-            rados_write_op_operate(_write_op.write_op, self.io, _oid, &_mtime, _flags)
+            ret = rados_write_op_operate(_write_op.write_op, self.io, _oid, &_mtime, _flags)
+        if ret != 0:
+            raise make_ex(ret, "Failed to operate write op for oid %s" % oid)
 
     @requires(('read_op', ReadOp), ('oid', str_type), ('flag', opt(int)))
     def operate_read_op(self, read_op, oid, flag=LIBRADOS_OPERATION_NOFLAG):
@@ -2819,7 +2821,9 @@ returned %d, but should return zero on success." % (self.name, ret))
             int _flag = flag
 
         with nogil:
-            rados_read_op_operate(_read_op.read_op, self.io, _oid, _flag)
+            ret = rados_read_op_operate(_read_op.read_op, self.io, _oid, _flag)
+        if ret != 0:
+            raise make_ex(ret, "Failed to operate read op for oid %s" % oid)
 
     @requires(('read_op', ReadOp), ('start_after', str_type), ('filter_prefix', str_type), ('max_return', int))
     def get_omap_vals(self, read_op, start_after, filter_prefix, max_return):
@@ -2871,7 +2875,7 @@ returned %d, but should return zero on success." % (self.name, ret))
             ReadOp _read_op = read_op
             rados_omap_iter_t iter_addr = NULL
             int _max_return = max_return
-            int prval
+            int prval = 0
 
         with nogil:
             rados_read_op_omap_get_keys(_read_op.read_op, _start_after,
@@ -2896,7 +2900,7 @@ returned %d, but should return zero on success." % (self.name, ret))
             rados_omap_iter_t iter_addr
             char **_keys = to_bytes_array(keys)
             size_t key_num = len(keys)
-            int prval
+            int prval = 0
 
         try:
             with nogil:
diff --git a/src/pybind/rados/setup.py b/src/pybind/rados/setup.py
index 2b0f0b6..14d9da5 100755
--- a/src/pybind/rados/setup.py
+++ b/src/pybind/rados/setup.py
@@ -43,7 +43,6 @@ setup(
         Extension("rados",
             ["rados.pyx"],
             libraries=["rados"],
-            language="c++"
             )
     ], build_dir=os.environ.get("CYTHON_BUILD_DIR", None)),
     cmdclass={
diff --git a/src/pybind/rbd/setup.py b/src/pybind/rbd/setup.py
index 7ca00b4..79d4d58 100755
--- a/src/pybind/rbd/setup.py
+++ b/src/pybind/rbd/setup.py
@@ -43,7 +43,6 @@ setup(
         Extension("rbd",
             ["rbd.pyx"],
             libraries=["rbd"],
-            language="c++"
             )
     ], build_dir=os.environ.get("CYTHON_BUILD_DIR", None), include_path=[
         os.path.join(os.path.dirname(__file__), "..", "rados")]
diff --git a/src/rgw/rgw_acl_s3.cc b/src/rgw/rgw_acl_s3.cc
index aecb7a3..64da1a8 100644
--- a/src/rgw/rgw_acl_s3.cc
+++ b/src/rgw/rgw_acl_s3.cc
@@ -435,7 +435,7 @@ bool RGWAccessControlPolicy_S3::xml_end(const char *el) {
 }
 
 void  RGWAccessControlPolicy_S3::to_xml(ostream& out) {
-  out << "<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">";
+  out << "<AccessControlPolicy xmlns=\"" << XMLNS_AWS_S3 << "\">";
   ACLOwner_S3& _owner = static_cast<ACLOwner_S3 &>(owner);
   RGWAccessControlList_S3& _acl = static_cast<RGWAccessControlList_S3 &>(acl);
   _owner.to_xml(out);
diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc
index f95afc6..3e4bcba 100644
--- a/src/rgw/rgw_admin.cc
+++ b/src/rgw/rgw_admin.cc
@@ -1333,9 +1333,18 @@ static int send_to_remote_gateway(const string& remote, req_info& info,
   return ret;
 }
 
-static int send_to_url(const string& url, RGWAccessKey& key, req_info& info,
+static int send_to_url(const string& url, const string& access,
+                       const string& secret, req_info& info,
                        bufferlist& in_data, JSONParser& parser)
 {
+  if (access.empty() || secret.empty()) {
+    cerr << "An --access-key and --secret must be provided with --url." << std::endl;
+    return -EINVAL;
+  }
+  RGWAccessKey key;
+  key.id = access;
+  key.key = secret;
+
   list<pair<string, string> > params;
   RGWRESTSimpleRequest req(g_ceph_context, url, NULL, &params);
 
@@ -1358,15 +1367,7 @@ static int send_to_remote_or_url(const string& remote, const string& url,
   if (url.empty()) {
     return send_to_remote_gateway(remote, info, in_data, parser);
   }
-
-  if (access.empty() || secret.empty()) {
-    cerr << "An --access-key and --secret must be provided with --url." << std::endl;
-    return -EINVAL;
-  }
-  RGWAccessKey key;
-  key.id = access;
-  key.key = secret;
-  return send_to_url(url, key, info, in_data, parser);
+  return send_to_url(url, access, secret, info, in_data, parser);
 }
 
 static int commit_period(RGWRealm& realm, RGWPeriod& period,
@@ -1481,7 +1482,8 @@ static int update_period(const string& realm_id, const string& realm_name,
   period.fork();
   ret = period.update();
   if(ret < 0) {
-    cerr << "failed to update period: " << cpp_strerror(-ret) << std::endl;
+    // Dropping the error message here, as both the ret codes were handled in
+    // period.update()
     return ret;
   }
   ret = period.store_info(false);
@@ -2383,6 +2385,11 @@ int main(int argc, char **argv)
 
   RGWStreamFlusher f(formatter, cout);
 
+  // not a raw op if 'period update' needs to commit to master
+  bool raw_period_update = opt_cmd == OPT_PERIOD_UPDATE && !commit;
+  // not a raw op if 'period pull' needs to look up remotes
+  bool raw_period_pull = opt_cmd == OPT_PERIOD_PULL && remote.empty() && !url.empty();
+
   bool raw_storage_op = (opt_cmd == OPT_ZONEGROUP_ADD || opt_cmd == OPT_ZONEGROUP_CREATE || opt_cmd == OPT_ZONEGROUP_DELETE ||
 			 opt_cmd == OPT_ZONEGROUP_GET || opt_cmd == OPT_ZONEGROUP_LIST ||  
                          opt_cmd == OPT_ZONEGROUP_SET || opt_cmd == OPT_ZONEGROUP_DEFAULT ||
@@ -2395,7 +2402,7 @@ int main(int argc, char **argv)
 			 opt_cmd == OPT_REALM_CREATE || opt_cmd == OPT_PERIOD_PREPARE ||
 			 opt_cmd == OPT_PERIOD_DELETE || opt_cmd == OPT_PERIOD_GET ||
 			 opt_cmd == OPT_PERIOD_GET_CURRENT || opt_cmd == OPT_PERIOD_LIST ||
-			 (opt_cmd == OPT_PERIOD_UPDATE && !commit) ||
+                         raw_period_update || raw_period_pull ||
 			 opt_cmd == OPT_REALM_DELETE || opt_cmd == OPT_REALM_GET || opt_cmd == OPT_REALM_LIST ||
 			 opt_cmd == OPT_REALM_LIST_PERIODS ||
 			 opt_cmd == OPT_REALM_GET_DEFAULT || opt_cmd == OPT_REALM_REMOVE ||
@@ -2531,11 +2538,30 @@ int main(int argc, char **argv)
                                 commit, remote, url, access_key, secret_key,
                                 formatter);
 	if (ret < 0) {
-          cerr << "period update failed: " << cpp_strerror(-ret) << std::endl;
 	  return ret;
 	}
       }
       break;
+    case OPT_PERIOD_PULL: // period pull --url
+      {
+        if (url.empty()) {
+          cerr << "A --url or --remote must be provided." << std::endl;
+          return -EINVAL;
+        }
+        RGWPeriod period;
+        int ret = do_period_pull(remote, url, access_key, secret_key,
+                                 realm_id, realm_name, period_id, period_epoch,
+                                 &period);
+        if (ret < 0) {
+          cerr << "period pull failed: " << cpp_strerror(-ret) << std::endl;
+          return ret;
+        }
+
+        encode_json("period", period, formatter);
+        formatter->flush(cout);
+        cout << std::endl;
+      }
+      break;
     case OPT_REALM_CREATE:
       {
 	if (realm_name.empty()) {
@@ -2734,6 +2760,10 @@ int main(int argc, char **argv)
       break;
     case OPT_REALM_PULL:
       {
+        if (url.empty()) {
+          cerr << "A --url must be provided." << std::endl;
+          return EINVAL;
+        }
         RGWEnv env;
         req_info info(g_ceph_context, &env);
         info.method = "GET";
@@ -2747,8 +2777,7 @@ int main(int argc, char **argv)
 
         bufferlist bl;
         JSONParser p;
-        int ret = send_to_remote_or_url(remote, url, access_key, secret_key,
-                                        info, bl, p);
+        int ret = send_to_url(url, access_key, secret_key, info, bl, p);
         if (ret < 0) {
           cerr << "request failed: " << cpp_strerror(-ret) << std::endl;
           if (ret == -EACCES) {
@@ -3732,9 +3761,9 @@ int main(int argc, char **argv)
       }
     }
     return 0;
-  case OPT_PERIOD_PULL:
+  case OPT_PERIOD_PULL: // period pull --remote
     {
-      if (remote.empty() && url.empty() ) {
+      if (remote.empty()) {
 	/* use realm master zonegroup as remote */
 	RGWRealm realm(realm_id, realm_name);
 	int ret = realm.init(g_ceph_context, store);
@@ -3756,6 +3785,7 @@ int main(int argc, char **argv)
                                &period);
       if (ret < 0) {
         cerr << "period pull failed: " << cpp_strerror(-ret) << std::endl;
+        return ret;
       }
 
       encode_json("period", period, formatter);
@@ -3769,8 +3799,7 @@ int main(int argc, char **argv)
                               commit, remote, url, access_key, secret_key,
                               formatter);
       if (ret < 0) {
-        cerr << "period update failed: " << cpp_strerror(-ret) << std::endl;
-        return ret;
+	return ret;
       }
     }
     return 0;
diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h
index 037703e..39e0d4a 100644
--- a/src/rgw/rgw_common.h
+++ b/src/rgw/rgw_common.h
@@ -95,6 +95,10 @@ using ceph::crypto::MD5;
 #define RGW_ATTR_OLH_ID_TAG     RGW_ATTR_OLH_PREFIX "idtag"
 #define RGW_ATTR_OLH_PENDING_PREFIX RGW_ATTR_OLH_PREFIX "pending."
 
+/* RGW File Attributes */
+#define RGW_ATTR_UNIX_KEY1      RGW_ATTR_PREFIX "unix-key1"
+#define RGW_ATTR_UNIX1          RGW_ATTR_PREFIX "unix1"
+
 #define RGW_BUCKETS_OBJ_SUFFIX ".buckets"
 
 #define RGW_MAX_PENDING_CHUNKS  16
diff --git a/src/rgw/rgw_coroutine.cc b/src/rgw/rgw_coroutine.cc
index ef28ae2..a09afc8 100644
--- a/src/rgw/rgw_coroutine.cc
+++ b/src/rgw/rgw_coroutine.cc
@@ -803,6 +803,19 @@ void RGWCoroutine::dump(Formatter *f) const {
   }
 }
 
+RGWSimpleCoroutine::~RGWSimpleCoroutine()
+{
+  if (!called_cleanup) {
+    request_cleanup();
+  }
+}
+
+void RGWSimpleCoroutine::call_cleanup()
+{
+  called_cleanup = true;
+  request_cleanup();
+}
+
 int RGWSimpleCoroutine::operate()
 {
   int ret = 0;
@@ -812,6 +825,7 @@ int RGWSimpleCoroutine::operate()
     yield return state_request_complete();
     yield return state_all_complete();
     drain_all();
+    call_cleanup();
     return set_state(RGWCoroutine_Done, ret);
   }
   return 0;
@@ -821,6 +835,7 @@ int RGWSimpleCoroutine::state_init()
 {
   int ret = init();
   if (ret < 0) {
+    call_cleanup();
     return set_state(RGWCoroutine_Error, ret);
   }
   return 0;
@@ -830,6 +845,7 @@ int RGWSimpleCoroutine::state_send_request()
 {
   int ret = send_request();
   if (ret < 0) {
+    call_cleanup();
     return set_state(RGWCoroutine_Error, ret);
   }
   return io_block(0);
@@ -839,6 +855,7 @@ int RGWSimpleCoroutine::state_request_complete()
 {
   int ret = request_complete();
   if (ret < 0) {
+    call_cleanup();
     return set_state(RGWCoroutine_Error, ret);
   }
   return 0;
@@ -848,6 +865,7 @@ int RGWSimpleCoroutine::state_all_complete()
 {
   int ret = finish();
   if (ret < 0) {
+    call_cleanup();
     return set_state(RGWCoroutine_Error, ret);
   }
   return 0;
diff --git a/src/rgw/rgw_coroutine.h b/src/rgw/rgw_coroutine.h
index 0912857..11addf6 100644
--- a/src/rgw/rgw_coroutine.h
+++ b/src/rgw/rgw_coroutine.h
@@ -536,6 +536,7 @@ public:
     }
   }
   virtual ~RGWCoroutinesManager() {
+    stop();
     completion_mgr->put();
     if (cr_registry) {
       cr_registry->remove(this);
@@ -545,8 +546,9 @@ public:
   int run(list<RGWCoroutinesStack *>& ops);
   int run(RGWCoroutine *op);
   void stop() {
-    going_down.set(1);
-    completion_mgr->go_down();
+    if (going_down.inc() == 1) {
+      completion_mgr->go_down();
+    }
   }
 
   virtual void report_error(RGWCoroutinesStack *op);
@@ -562,6 +564,8 @@ public:
 };
 
 class RGWSimpleCoroutine : public RGWCoroutine {
+  bool called_cleanup;
+
   int operate();
 
   int state_init();
@@ -569,14 +573,17 @@ class RGWSimpleCoroutine : public RGWCoroutine {
   int state_request_complete();
   int state_all_complete();
 
+  void call_cleanup();
+
 public:
-  RGWSimpleCoroutine(CephContext *_cct) : RGWCoroutine(_cct) {}
+  RGWSimpleCoroutine(CephContext *_cct) : RGWCoroutine(_cct), called_cleanup(false) {}
+  ~RGWSimpleCoroutine();
 
   virtual int init() { return 0; }
   virtual int send_request() = 0;
   virtual int request_complete() = 0;
   virtual int finish() { return 0; }
-
+  virtual void request_cleanup() {}
 };
 
 #endif
diff --git a/src/rgw/rgw_cors_s3.cc b/src/rgw/rgw_cors_s3.cc
index 03a4523..f00c65e 100644
--- a/src/rgw/rgw_cors_s3.cc
+++ b/src/rgw/rgw_cors_s3.cc
@@ -158,7 +158,7 @@ bool RGWCORSRule_S3::xml_end(const char *el) {
 
 void RGWCORSConfiguration_S3::to_xml(ostream& out) {
   XMLFormatter f;
-  f.open_object_section("CORSConfiguration");
+  f.open_object_section_in_ns("CORSConfiguration", XMLNS_AWS_S3);
   for(list<RGWCORSRule>::iterator it = rules.begin();
       it != rules.end(); ++it) {
     (static_cast<RGWCORSRule_S3 &>(*it)).to_xml(f);
diff --git a/src/rgw/rgw_cr_rados.cc b/src/rgw/rgw_cr_rados.cc
index 4b91acf..398433c 100644
--- a/src/rgw/rgw_cr_rados.cc
+++ b/src/rgw/rgw_cr_rados.cc
@@ -305,7 +305,7 @@ RGWSimpleRadosLockCR::RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados,
   s << "rados lock dest=" << pool << "/" << oid << " lock=" << lock_name << " cookie=" << cookie << " duration=" << duration;
 }
 
-RGWSimpleRadosLockCR::~RGWSimpleRadosLockCR()
+void RGWSimpleRadosLockCR::request_cleanup()
 {
   if (req) {
     req->finish();
@@ -341,7 +341,7 @@ RGWSimpleRadosUnlockCR::RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_ra
   set_description() << "rados unlock dest=" << pool << "/" << oid << " lock=" << lock_name << " cookie=" << cookie;
 }
 
-RGWSimpleRadosUnlockCR::~RGWSimpleRadosUnlockCR()
+void RGWSimpleRadosUnlockCR::request_cleanup()
 {
   if (req) {
     req->finish();
@@ -635,7 +635,7 @@ RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, RGWRados *store,
 {
 }
 
-RGWStatObjCR::~RGWStatObjCR()
+void RGWStatObjCR::request_cleanup()
 {
   if (req) {
     req->finish();
diff --git a/src/rgw/rgw_cr_rados.h b/src/rgw/rgw_cr_rados.h
index f320076..2459e78 100644
--- a/src/rgw/rgw_cr_rados.h
+++ b/src/rgw/rgw_cr_rados.h
@@ -9,7 +9,6 @@ class RGWAsyncRadosRequest : public RefCountedObject {
   RGWCoroutine *caller;
   RGWAioCompletionNotifier *notifier;
 
-  void *user_info;
   int retcode;
 
   bool done;
@@ -19,15 +18,18 @@ class RGWAsyncRadosRequest : public RefCountedObject {
 protected:
   virtual int _send_request() = 0;
 public:
-  RGWAsyncRadosRequest(RGWCoroutine *_caller, RGWAioCompletionNotifier *_cn) : caller(_caller), notifier(_cn),
+  RGWAsyncRadosRequest(RGWCoroutine *_caller, RGWAioCompletionNotifier *_cn) : caller(_caller), notifier(_cn), retcode(0),
                                                                                done(false), lock("RGWAsyncRadosRequest::lock") {
+    notifier->get();
     caller->get();
   }
   virtual ~RGWAsyncRadosRequest() {
+    notifier->put();
     caller->put();
   }
 
   void send_request() {
+    get();
     retcode = _send_request();
     {
       Mutex::Locker l(lock);
@@ -35,6 +37,7 @@ public:
         notifier->cb();
       }
     }
+    put();
   }
 
   int get_ret_status() { return retcode; }
@@ -195,7 +198,7 @@ public:
 						result(_result),
                                                 req(NULL) { }
                                                          
-  ~RGWSimpleRadosReadCR() {
+  void request_cleanup() {
     if (req) {
       req->finish();
     }
@@ -270,7 +273,7 @@ public:
                                                 pattrs(_pattrs),
                                                 req(NULL) { }
                                                          
-  ~RGWSimpleRadosReadAttrsCR() {
+  void request_cleanup() {
     if (req) {
       req->finish();
     }
@@ -302,7 +305,7 @@ public:
     ::encode(_data, bl);
   }
 
-  ~RGWSimpleRadosWriteCR() {
+  void request_cleanup() {
     if (req) {
       req->finish();
     }
@@ -342,7 +345,7 @@ public:
                                                 attrs(_attrs), req(NULL) {
   }
 
-  ~RGWSimpleRadosWriteAttrsCR() {
+  void request_cleanup() {
     if (req) {
       req->finish();
     }
@@ -374,6 +377,7 @@ public:
   RGWRadosSetOmapKeysCR(RGWRados *_store,
 		      rgw_bucket& _pool, const string& _oid,
 		      map<string, bufferlist>& _entries);
+
   ~RGWRadosSetOmapKeysCR();
 
   int send_request();
@@ -400,6 +404,7 @@ public:
 		      const rgw_bucket& _pool, const string& _oid,
 		      const string& _marker,
 		      map<string, bufferlist> *_entries, int _max_entries);
+
   ~RGWRadosGetOmapKeysCR();
 
   int send_request();
@@ -426,7 +431,7 @@ public:
 		      const rgw_bucket& _pool, const string& _oid, const string& _lock_name,
 		      const string& _cookie,
 		      uint32_t _duration);
-  ~RGWSimpleRadosLockCR();
+  void request_cleanup();
 
   int send_request();
   int request_complete();
@@ -447,7 +452,7 @@ public:
   RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
 		      const rgw_bucket& _pool, const string& _oid, const string& _lock_name,
 		      const string& _cookie);
-  ~RGWSimpleRadosUnlockCR();
+  void request_cleanup();
 
   int send_request();
   int request_complete();
@@ -518,7 +523,7 @@ public:
                          async_rados(_async_rados), lock(_lock), cond(_cond), secs(_secs), req(NULL) {
   }
 
-  ~RGWWaitCR() {
+  void request_cleanup() {
     wakeup();
     if (req) {
       req->finish();
@@ -616,7 +621,7 @@ public:
                         RGWBucketInfo *_bucket_info) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store),
                                                        bucket_name(_bucket_name), bucket_id(_bucket_id),
                                                        bucket_info(_bucket_info), req(NULL) {}
-  ~RGWGetBucketInstanceInfoCR() {
+  void request_cleanup() {
     if (req) {
       req->finish();
     }
@@ -693,7 +698,7 @@ public:
                                        copy_if_newer(_if_newer), req(NULL) {}
 
 
-  ~RGWFetchRemoteObjCR() {
+  void request_cleanup() {
     if (req) {
       req->finish();
     }
@@ -808,7 +813,7 @@ public:
     }
   }
 
-  ~RGWRemoveObjCR() {
+  void request_cleanup() {
     if (req) {
       req->finish();
     }
@@ -931,7 +936,7 @@ class RGWStatObjCR : public RGWSimpleCoroutine {
 	  const rgw_obj& obj, uint64_t *psize = nullptr,
 	  real_time* pmtime = nullptr, uint64_t *pepoch = nullptr,
 	  RGWObjVersionTracker *objv_tracker = nullptr);
-  ~RGWStatObjCR();
+  void request_cleanup();
 
   int send_request() override;
   int request_complete() override;
diff --git a/src/rgw/rgw_cr_rest.h b/src/rgw/rgw_cr_rest.h
index 88a051a..147784f 100644
--- a/src/rgw/rgw_cr_rest.h
+++ b/src/rgw/rgw_cr_rest.h
@@ -51,6 +51,12 @@ public:
     }
     return 0;
   }
+
+  void request_cleanup() {
+    if (http_op) {
+      http_op->put();
+    }
+  }
 };
 
 template <class S, class T>
@@ -89,6 +95,7 @@ public:
     int ret = op->aio_send(bl);
     if (ret < 0) {
       lsubdout(cct, rgw, 0) << "ERROR: failed to send post request" << dendl;
+      op->put();
       return ret;
     }
     std::swap(http_op, op); // store reference in http_op on success
@@ -109,10 +116,17 @@ public:
           << " status=" << op->get_http_status() << std::endl;
       lsubdout(cct, rgw, 0) << "ERROR: failed to wait for op, ret=" << ret
           << ": " << op->to_str() << dendl;
+      op->put();
       return ret;
     }
     return 0;
   }
+
+  void request_cleanup() {
+    if (http_op) {
+      http_op->put();
+    }
+  }
 };
 
 #endif
diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc
index 4c28867..1bc6b1f 100644
--- a/src/rgw/rgw_file.cc
+++ b/src/rgw/rgw_file.cc
@@ -54,7 +54,12 @@ namespace rgw {
 		      RGWFileHandle::FLAG_BUCKET);
       if (get<0>(fhr)) {
 	RGWFileHandle* rgw_fh = get<0>(fhr);
-	rgw_fh->set_times(req.get_ctime());
+	/* restore attributes */
+	auto ux_key = req.get_attr(RGW_ATTR_UNIX_KEY1);
+	auto ux_attrs = req.get_attr(RGW_ATTR_UNIX1);
+	if (ux_key && ux_attrs) {
+	  rgw_fh->decode_attrs(ux_key, ux_attrs);
+	}
       }
     }
     return fhr;
@@ -85,18 +90,25 @@ namespace rgw {
       }
     }
 #endif
-    std::string object_name{path};
-    for (auto ix : { 0, 1 }) {
+
+    /* XXX the need for two round-trip operations to identify file or
+     * directory leaf objects is unecessary--the current proposed
+     * mechanism to avoid this is to store leaf object names with an
+     * object locator w/o trailing slash */
+
+    /* mutating path */
+    std::string obj_path{parent->relative_object_name()};
+    if ((obj_path.length() > 0) &&
+	(obj_path.back() != '/'))
+      obj_path += "/";
+    obj_path += path;
+
+    for (auto ix : { 0, 1, 2 }) {
       switch (ix) {
       case 0:
       {
-	std::string obj_name{parent->relative_object_name()};
-	if ((obj_name.length() > 0) &&
-	    (obj_name.back() != '/'))
-	  obj_name += "/";
-	obj_name += path;
 	RGWStatObjRequest req(cct, get_user(),
-			      parent->bucket_name(), obj_name,
+			      parent->bucket_name(), obj_path,
 			      RGWStatObjRequest::FLAG_NONE);
 	int rc = rgwlib.get_fe()->execute_req(&req);
 	if ((rc == 0) &&
@@ -106,6 +118,12 @@ namespace rgw {
 	    RGWFileHandle* rgw_fh = get<0>(fhr);
 	    rgw_fh->set_size(req.get_size());
 	    rgw_fh->set_mtime(real_clock::to_timespec(req.get_mtime()));
+	    /* restore attributes */
+	    auto ux_key = req.get_attr(RGW_ATTR_UNIX_KEY1);
+	    auto ux_attrs = req.get_attr(RGW_ATTR_UNIX1);
+	    if (ux_key && ux_attrs) {
+	      rgw_fh->decode_attrs(ux_key, ux_attrs);
+	    }
 	  }
 	  goto done;
 	}
@@ -113,6 +131,33 @@ namespace rgw {
       break;
       case 1:
       {
+	/* try dir form */
+	obj_path += "/";
+	RGWStatObjRequest req(cct, get_user(),
+			      parent->bucket_name(), obj_path,
+			      RGWStatObjRequest::FLAG_NONE);
+	int rc = rgwlib.get_fe()->execute_req(&req);
+	if ((rc == 0) &&
+	    (req.get_ret() == 0)) {
+	  fhr = lookup_fh(parent, path, RGWFileHandle::FLAG_DIRECTORY);
+	  if (get<0>(fhr)) {
+	    RGWFileHandle* rgw_fh = get<0>(fhr);
+	    rgw_fh->set_size(req.get_size());
+	    rgw_fh->set_mtime(real_clock::to_timespec(req.get_mtime()));
+	    /* restore attributes */
+	    auto ux_key = req.get_attr(RGW_ATTR_UNIX_KEY1);
+	    auto ux_attrs = req.get_attr(RGW_ATTR_UNIX1);
+	    if (ux_key && ux_attrs) {
+	      rgw_fh->decode_attrs(ux_key, ux_attrs);
+	    }
+	  }
+	  goto done;
+	}
+      }
+      break;
+      case 2:
+      {
+	std::string object_name{path};
 	RGWStatLeafRequest req(cct, get_user(), parent, object_name);
 	int rc = rgwlib.get_fe()->execute_req(&req);
 	if ((rc == 0) &&
@@ -145,6 +190,24 @@ namespace rgw {
     return fhr;
   } /* RGWLibFS::stat_leaf */
 
+  int RGWLibFS::read(RGWFileHandle* rgw_fh, uint64_t offset, size_t length,
+		     size_t* bytes_read, void* buffer, uint32_t flags)
+  {
+    if (! rgw_fh->is_file())
+      return -EINVAL;
+
+    RGWReadRequest req(get_context(), get_user(), rgw_fh, offset, length,
+		       buffer);
+
+    int rc = rgwlib.get_fe()->execute_req(&req);
+    if ((rc == 0) &&
+	(req.get_ret() == 0)) {
+      *bytes_read = req.nread;
+    }
+
+    return rc;
+  }
+
   int RGWLibFS::unlink(RGWFileHandle* parent, const char *name)
   {
     int rc = 0;
@@ -261,6 +324,24 @@ namespace rgw {
 
     LookupFHResult fhr;
     RGWFileHandle* rgw_fh = nullptr;
+    buffer::list ux_key, ux_attrs;
+
+    fhr = lookup_fh(parent, name,
+		    RGWFileHandle::FLAG_CREATE|
+		    RGWFileHandle::FLAG_DIRECTORY|
+		    RGWFileHandle::FLAG_LOCK);
+    rgw_fh = get<0>(fhr);
+    if (rgw_fh) {
+      rgw_fh->create_stat(st, mask);
+      rgw_fh->set_times(real_clock::now());
+      /* save attrs */
+      rgw_fh->encode_attrs(ux_key, ux_attrs);
+      rgw_fh->stat(st);
+      get<0>(mkr) = rgw_fh;
+    } else {
+      get<1>(mkr) = -EIO;
+      return mkr;
+    }
 
     if (parent->is_root()) {
       /* bucket */
@@ -268,12 +349,20 @@ namespace rgw {
       /* enforce S3 name restrictions */
       rc = valid_s3_bucket_name(bname, false /* relaxed */);
       if (rc != 0) {
-	rc = -EINVAL;
-	goto out;
+	rgw_fh->flags |= RGWFileHandle::FLAG_DELETED;
+	rgw_fh->mtx.unlock();
+	unref(rgw_fh);
+	get<0>(mkr) = nullptr;
+	return mkr;
       }
 
       string uri = "/" + bname; /* XXX get rid of URI some day soon */
       RGWCreateBucketRequest req(get_context(), get_user(), uri);
+
+      /* save attrs */
+      req.emplace_attr(RGW_ATTR_UNIX_KEY1, std::move(ux_key));
+      req.emplace_attr(RGW_ATTR_UNIX1, std::move(ux_attrs));
+
       rc = rgwlib.get_fe()->execute_req(&req);
       rc2 = req.get_ret();
     } else {
@@ -287,29 +376,32 @@ namespace rgw {
 	dir_name += "/";
       dir_name += name;
       dir_name += "/";
+
       RGWPutObjRequest req(get_context(), get_user(), parent->bucket_name(),
 			  dir_name, bl);
+
+      /* save attrs */
+      req.emplace_attr(RGW_ATTR_UNIX_KEY1, std::move(ux_key));
+      req.emplace_attr(RGW_ATTR_UNIX1, std::move(ux_attrs));
+
       rc = rgwlib.get_fe()->execute_req(&req);
       rc2 = req.get_ret();
     }
 
-    if ((rc == 0) &&
-	(rc2 == 0)) {
-      fhr = lookup_fh(parent, name,
-		      RGWFileHandle::FLAG_CREATE|
-		      RGWFileHandle::FLAG_DIRECTORY);
-      rgw_fh = get<0>(fhr);
-      if (rgw_fh) {
-	/* XXX unify timestamps */
-	rgw_fh->create_stat(st, mask);
-	rgw_fh->set_times(real_clock::now());
-	rgw_fh->stat(st);
-	get<0>(mkr) = rgw_fh;
-      } else
-	rc = -EIO;
+    if (! ((rc == 0) &&
+	   (rc2 == 0))) {
+      /* op failed */
+      rgw_fh->flags |= RGWFileHandle::FLAG_DELETED;
+      rgw_fh->mtx.unlock(); /* !LOCKED */
+      unref(rgw_fh);
+      get<0>(mkr) = nullptr;
+      /* fixup rc */
+      if (!rc)
+	rc = rc2;
+    } else {
+      rgw_fh->mtx.unlock(); /* !LOCKED */
     }
 
-  out:
     get<1>(mkr) = rc;
 
     return mkr;
@@ -469,6 +561,26 @@ namespace rgw {
     } while (! stop);
   } /* RGWLibFS::gc */
 
+  void RGWFileHandle::encode_attrs(ceph::buffer::list& ux_key1,
+				   ceph::buffer::list& ux_attrs1)
+  {
+    fh_key fhk(this->fh.fh_hk);
+    rgw::encode(fhk, ux_key1);
+    rgw::encode(*this, ux_attrs1);
+  } /* RGWFileHandle::encode_attrs */
+
+  void RGWFileHandle::decode_attrs(const ceph::buffer::list* ux_key1,
+				   const ceph::buffer::list* ux_attrs1)
+  {
+    fh_key fhk;
+    auto bl_iter_key1  = const_cast<buffer::list*>(ux_key1)->begin();
+    rgw::decode(fhk, bl_iter_key1);
+    assert(this->fh.fh_hk == fhk.fh_hk);
+
+    auto bl_iter_unix1 = const_cast<buffer::list*>(ux_attrs1)->begin();
+    rgw::decode(*this, bl_iter_unix1);
+  } /* RGWFileHandle::decode_attrs */
+
   bool RGWFileHandle::reclaim() {
     fs->fh_cache.remove(fh.fh_hk.object, this, cohort::lru::FLAG_NONE);
     return true;
@@ -688,8 +800,7 @@ namespace rgw {
 
   int RGWWriteRequest::exec_finish()
   {
-    bufferlist bl, aclbl;
-    map<string, bufferlist> attrs;
+    buffer::list bl, aclbl, ux_key, ux_attrs;
     map<string, string>::iterator iter;
     char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
     unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
@@ -710,15 +821,19 @@ namespace rgw {
     buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
     etag = calc_md5;
 
+    bl.append(etag.c_str(), etag.size() + 1);
+    emplace_attr(RGW_ATTR_ETAG, std::move(bl));
+
     policy.encode(aclbl);
-    attrs[RGW_ATTR_ACL] = aclbl;
+    emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
 
-    bl.append(etag.c_str(), etag.size() + 1);
-    attrs[RGW_ATTR_ETAG] = bl;
+    rgw_fh->encode_attrs(ux_key, ux_attrs);
+    emplace_attr(RGW_ATTR_UNIX_KEY1, std::move(ux_key));
+    emplace_attr(RGW_ATTR_UNIX1, std::move(ux_attrs));
 
     for (iter = s->generic_attrs.begin(); iter != s->generic_attrs.end();
 	 ++iter) {
-      bufferlist& attrbl = attrs[iter->first];
+      buffer::list& attrbl = attrs[iter->first];
       const string& val = iter->second;
       attrbl.append(val.c_str(), val.size() + 1);
     }
@@ -730,13 +845,13 @@ namespace rgw {
      * is an SLO or not. Appending the attribute must be performed AFTER
      * processing any input from user in order to prohibit overwriting. */
     if (unlikely(!! slo_info)) {
-      bufferlist slo_userindicator_bl;
+      buffer::list slo_userindicator_bl;
       ::encode("True", slo_userindicator_bl);
-      attrs[RGW_ATTR_SLO_UINDICATOR] = slo_userindicator_bl;
+      emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
     }
 
-    op_ret = processor->complete(etag, &mtime, real_time(), attrs, delete_at, if_match,
-				 if_nomatch);
+    op_ret = processor->complete(etag, &mtime, real_time(), attrs, delete_at,
+				 if_match, if_nomatch);
     if (! op_ret) {
       /* update stats */
       rgw_fh->set_mtime(real_clock::to_timespec(mtime));
@@ -1083,22 +1198,10 @@ int rgw_read(struct rgw_fs *rgw_fs,
 	     size_t length, size_t *bytes_read, void *buffer,
 	     uint32_t flags)
 {
-  CephContext* cct = static_cast<CephContext*>(rgw_fs->rgw);
   RGWLibFS *fs = static_cast<RGWLibFS*>(rgw_fs->fs_private);
   RGWFileHandle* rgw_fh = get_rgwfh(fh);
 
-  if (! rgw_fh->is_file())
-    return -EINVAL;
-
-  RGWReadRequest req(cct, fs->get_user(), rgw_fh, offset, length, buffer);
-
-  int rc = rgwlib.get_fe()->execute_req(&req);
-  if ((rc == 0) &&
-      (req.get_ret() == 0)) {
-    *bytes_read = req.nread;
-  }
-
-  return rc;
+  return fs->read(rgw_fh, offset, length, bytes_read, buffer, flags);
 }
 
 /*
diff --git a/src/rgw/rgw_file.h b/src/rgw/rgw_file.h
index 26b97b4..42b6649 100644
--- a/src/rgw/rgw_file.h
+++ b/src/rgw/rgw_file.h
@@ -100,8 +100,24 @@ namespace rgw {
       fh_hk.bucket = XXH64(_b.c_str(), _o.length(), seed);
       fh_hk.object = XXH64(_o.c_str(), _o.length(), seed);
     }
+
+    void encode(buffer::list& bl) const {
+      ENCODE_START(1, 1, bl);
+      ::encode(fh_hk.bucket, bl);
+      ::encode(fh_hk.object, bl);
+      ENCODE_FINISH(bl);
+    }
+
+    void decode(bufferlist::iterator& bl) {
+      DECODE_START(1, bl);
+      ::decode(fh_hk.bucket, bl);
+      ::decode(fh_hk.object, bl);
+      DECODE_FINISH(bl);
+    }
   }; /* fh_key */
 
+  WRITE_CLASS_ENCODER(fh_key);
+
   inline bool operator<(const fh_key& lhs, const fh_key& rhs)
   {
     return ((lhs.fh_hk.bucket < rhs.fh_hk.bucket) ||
@@ -153,7 +169,7 @@ namespace rgw {
     using marker_cache_t = flat_map<uint64_t, dirent_string>;
     using name_cache_t = flat_map<dirent_string, uint8_t>;
 
-    struct state {
+    struct State {
       uint64_t dev;
       size_t size;
       uint64_t nlink;
@@ -163,7 +179,7 @@ namespace rgw {
       struct timespec ctime;
       struct timespec mtime;
       struct timespec atime;
-      state() : dev(0), size(0), nlink(1), owner_uid(0), owner_gid(0),
+      State() : dev(0), size(0), nlink(1), owner_uid(0), owner_gid(0),
 		ctime{0,0}, mtime{0,0}, atime{0,0} {}
     } state;
 
@@ -216,6 +232,9 @@ namespace rgw {
     static constexpr uint32_t FLAG_LOCK =   0x0040;
     static constexpr uint32_t FLAG_DELETED = 0x0080;
 
+#define CREATE_FLAGS(x) \
+    ((x) & ~(RGWFileHandle::FLAG_CREATE|RGWFileHandle::FLAG_LOCK))
+
     friend class RGWLibFS;
 
   private:
@@ -323,10 +342,10 @@ namespace rgw {
       if (mask & RGW_SETATTR_MODE)  {
 	switch (fh.fh_type) {
 	case RGW_FS_TYPE_DIRECTORY:
-	  st->st_mode = state.unix_mode|S_IFDIR;
+	  state.unix_mode = st->st_mode|S_IFDIR;
 	  break;
 	case RGW_FS_TYPE_FILE:
-	  st->st_mode = state.unix_mode|S_IFREG;
+	  state.unix_mode = st->st_mode|S_IFREG;
       default:
 	break;
 	}
@@ -350,11 +369,11 @@ namespace rgw {
 
       switch (fh.fh_type) {
       case RGW_FS_TYPE_DIRECTORY:
-	st->st_mode = RGW_RWXMODE|S_IFDIR;
+	st->st_mode = RGW_RWXMODE|S_IFDIR /* state.unix_mode|S_IFDIR */;
 	st->st_nlink = 3;
 	break;
       case RGW_FS_TYPE_FILE:
-	st->st_mode = RGW_RWMODE|S_IFREG;
+	st->st_mode = RGW_RWMODE|S_IFREG /* state.unix_mode|S_IFREG */;
 	st->st_nlink = 1;
 	st->st_blksize = 4096;
 	st->st_size = state.size;
@@ -427,7 +446,7 @@ namespace rgw {
 	return fh_key(fhk.fh_hk.object, name.c_str());
       else {
 	std::string key_name = make_key_name(name.c_str());
-	return fh_key(fhk.fh_hk.object, key_name.c_str());
+	return fh_key(fhk.fh_hk.bucket, key_name.c_str());
       }
     }
 
@@ -522,6 +541,46 @@ namespace rgw {
       state.atime = ts;
     }
 
+    void encode(buffer::list& bl) const {
+      ENCODE_START(1, 1, bl);
+      ::encode(uint32_t(fh.fh_type), bl);
+      ::encode(state.dev, bl);
+      ::encode(state.size, bl);
+      ::encode(state.nlink, bl);
+      ::encode(state.owner_uid, bl);
+      ::encode(state.owner_gid, bl);
+      ::encode(state.unix_mode, bl);
+      for (const auto& t : { state.ctime, state.mtime, state.atime }) {
+	::encode(real_clock::from_timespec(t), bl);
+      }
+      ENCODE_FINISH(bl);
+    }
+
+    void decode(bufferlist::iterator& bl) {
+      DECODE_START(1, bl);
+      uint32_t fh_type;
+      ::decode(fh_type, bl);
+      assert(fh.fh_type == fh_type);
+      ::decode(state.dev, bl);
+      ::decode(state.size, bl);
+      ::decode(state.nlink, bl);
+      ::decode(state.owner_uid, bl);
+      ::decode(state.owner_gid, bl);
+      ::decode(state.unix_mode, bl);
+      ceph::real_time enc_time;
+      for (auto t : { &(state.ctime), &(state.mtime), &(state.atime) }) {
+	::decode(enc_time, bl);
+	*t = real_clock::to_timespec(enc_time);
+      }
+      DECODE_FINISH(bl);
+    }
+
+    void encode_attrs(ceph::buffer::list& ux_key1,
+		      ceph::buffer::list& ux_attrs1);
+
+    void decode_attrs(const ceph::buffer::list* ux_key1,
+		      const ceph::buffer::list* ux_attrs1);
+
     virtual bool reclaim();
 
     typedef cohort::lru::LRU<std::mutex> FhLRU;
@@ -605,6 +664,8 @@ namespace rgw {
 
   }; /* RGWFileHandle */
 
+  WRITE_CLASS_ENCODER(RGWFileHandle);
+
   static inline RGWFileHandle* get_rgwfh(struct rgw_file_handle* fh) {
     return static_cast<RGWFileHandle*>(fh->fh_private);
   }
@@ -738,7 +799,7 @@ namespace rgw {
 
     /* find or create an RGWFileHandle */
     LookupFHResult lookup_fh(RGWFileHandle* parent, const char *name,
-			     const uint32_t cflags = RGWFileHandle::FLAG_NONE) {
+			     const uint32_t flags = RGWFileHandle::FLAG_NONE) {
       using std::get;
 
       LookupFHResult fhr { nullptr, RGWFileHandle::FLAG_NONE };
@@ -748,11 +809,16 @@ namespace rgw {
 	return fhr;
 
       RGWFileHandle::FHCache::Latch lat;
-      memset(&lat, 0, sizeof(lat)); // XXXX testing
 
       std::string obj_name{name};
       std::string key_name{parent->make_key_name(name)};
 
+      lsubdout(get_context(), rgw, 10)
+	<< __func__ << " lookup called on "
+	<< parent->object_name() << " for " << key_name
+	<< " (" << obj_name << ")"
+	<< dendl;
+
       fh_key fhk = parent->make_fhk(key_name);
 
     retry:
@@ -777,21 +843,23 @@ namespace rgw {
 	  goto retry; /* !LATCHED */
 	}
 	/* LATCHED, LOCKED */
-	if (! (fh->flags & RGWFileHandle::FLAG_LOCK))
+	if (! (flags & RGWFileHandle::FLAG_LOCK))
 	  fh->mtx.unlock(); /* ! LOCKED */
       } else {
 	/* make or re-use handle */
 	RGWFileHandle::Factory prototype(this, get_inst(), parent, fhk,
-					 obj_name, cflags);
+					 obj_name, CREATE_FLAGS(flags));
 	fh = static_cast<RGWFileHandle*>(
 	  fh_lru.insert(&prototype,
 			cohort::lru::Edge::MRU,
 			cohort::lru::FLAG_INITIAL));
 	if (fh) {
+	  /* lock fh (LATCHED) */
+	  if (flags & RGWFileHandle::FLAG_LOCK)
+	    fh->mtx.lock();
+	  /* inserts, releasing latch */
 	  fh_cache.insert_latched(fh, lat, RGWFileHandle::FHCache::FLAG_UNLOCK);
 	  get<1>(fhr) |= RGWFileHandle::FLAG_CREATE;
-	  if (fh->flags & RGWFileHandle::FLAG_LOCK)
-	    fh->mtx.lock();
 	  goto out; /* !LATCHED */
 	} else {
 	  lat.lock->unlock();
@@ -821,6 +889,9 @@ namespace rgw {
     LookupFHResult stat_leaf(RGWFileHandle* parent, const char *path,
 			     uint32_t flags);
 
+    int read(RGWFileHandle* rgw_fh, uint64_t offset, size_t length,
+	     size_t* bytes_read, void* buffer, uint32_t flags);
+
     int rename(RGWFileHandle* old_fh, RGWFileHandle* new_fh,
 	       const char *old_name, const char *new_name);
 
@@ -829,6 +900,8 @@ namespace rgw {
 
     MkObjResult mkdir(RGWFileHandle* parent, const char *name, struct stat *st,
 		      uint32_t mask, uint32_t flags);
+    MkObjResult mkdir2(RGWFileHandle* parent, const char *name, struct stat *st,
+		      uint32_t mask, uint32_t flags);
 
     int unlink(RGWFileHandle* parent, const char *name);
 
@@ -1561,7 +1634,12 @@ public:
   uint64_t get_size() { return _size; }
   real_time ctime() { return mod_time; } // XXX
   real_time mtime() { return mod_time; }
-  map<string, bufferlist>& get_attrs() { return attrs; }
+  std::map<string, bufferlist>& get_attrs() { return attrs; }
+
+  buffer::list* get_attr(const std::string& k) {
+    auto iter = attrs.find(k);
+    return (iter != attrs.end()) ? &(iter->second) : nullptr;
+  }
 
   virtual bool only_bucket() { return false; }
 
@@ -1602,6 +1680,7 @@ public:
   virtual int send_response_data(ceph::buffer::list& _bl, off_t s_off,
 				off_t e_off) {
     /* NOP */
+    /* XXX save attrs? */
     return 0;
   }
 
@@ -1622,6 +1701,7 @@ class RGWStatBucketRequest : public RGWLibRequest,
 {
 public:
   std::string uri;
+  std::map<std::string, buffer::list> attrs;
 
   RGWStatBucketRequest(CephContext* _cct, RGWUserInfo *_user,
 		       const std::string& _path)
@@ -1630,6 +1710,11 @@ public:
     op = this;
   }
 
+  buffer::list* get_attr(const std::string& k) {
+    auto iter = attrs.find(k);
+    return (iter != attrs.end()) ? &(iter->second) : nullptr;
+  }
+
   virtual bool only_bucket() { return false; }
 
   virtual int op_init() {
@@ -1672,6 +1757,7 @@ public:
 
   virtual void send_response() {
     bucket.creation_time = get_state()->bucket_info.creation_time;
+    std::swap(attrs, get_state()->bucket_attrs);
   }
 
   bool matched() {
@@ -1904,7 +1990,11 @@ public:
 		    const std::string& _src_name, const std::string& _dst_name)
     : RGWLibRequest(_cct, _user), src_parent(_src_parent),
       dst_parent(_dst_parent), src_name(_src_name), dst_name(_dst_name) {
+    /* all requests have this */
     op = this;
+
+    /* allow this request to replace selected attrs */
+    attrs_mod = RGWRados::ATTRSMOD_MERGE;
   }
 
   virtual bool only_bucket() { return true; }
@@ -1940,6 +2030,14 @@ public:
     if (! valid_s3_object_name(dest_object))
       return -ERR_INVALID_OBJECT_NAME;
 
+    /* XXX and fixup key attr (could optimize w/string ref and
+     * dest_object) */
+    buffer::list ux_key;
+    std::string key_name{dst_parent->make_key_name(dst_name.c_str())};
+    fh_key fhk = dst_parent->make_fhk(key_name);
+    rgw::encode(fhk, ux_key);
+    emplace_attr(RGW_ATTR_UNIX_KEY1, std::move(ux_key));
+
 #if 0 /* XXX needed? */
     s->relative_uri = uri;
     s->info.request_uri = uri; // XXX
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index c781eb9..d050821 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -1879,9 +1879,8 @@ static void populate_with_generic_attrs(const req_state * const s,
 void RGWCreateBucket::execute()
 {
   RGWAccessControlPolicy old_policy(s->cct);
-  map<string, bufferlist> attrs;
-  bufferlist aclbl;
-  bufferlist corsbl;
+  buffer::list aclbl;
+  buffer::list corsbl;
   bool existed;
   string bucket_name;
   rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
@@ -1974,17 +1973,17 @@ void RGWCreateBucket::execute()
   }
 
   policy.encode(aclbl);
-  attrs[RGW_ATTR_ACL] = aclbl;
+  emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
 
   if (has_cors) {
     cors_config.encode(corsbl);
-    attrs[RGW_ATTR_CORS] = corsbl;
+    emplace_attr(RGW_ATTR_CORS, std::move(corsbl));
   }
   s->bucket.tenant = s->bucket_tenant; /* ignored if bucket exists */
   s->bucket.name = s->bucket_name;
-  op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id, placement_rule,
-                                swift_ver_location,
-				attrs, info, pobjv, &ep_objv, creation_time,
+  op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
+				placement_rule, swift_ver_location, attrs,
+				info, pobjv, &ep_objv, creation_time,
 				pmaster_bucket, true);
   /* continue if EEXIST and create_bucket will fail below.  this way we can
    * recover from a partial create by retrying it. */
@@ -2332,14 +2331,12 @@ void RGWPutObj::execute()
   unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
   MD5 hash;
   bufferlist bl, aclbl;
-  map<string, bufferlist> attrs;
   int len;
   map<string, string>::iterator iter;
   bool multipart;
 
   bool need_calc_md5 = (dlo_manifest == NULL) && (slo_info == NULL);
 
-
   perfcounter->inc(l_rgw_put);
   op_ret = -EINVAL;
   if (s->object.empty()) {
@@ -2519,8 +2516,7 @@ void RGWPutObj::execute()
   }
 
   policy.encode(aclbl);
-
-  attrs[RGW_ATTR_ACL] = aclbl;
+  emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
 
   if (dlo_manifest) {
     op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
@@ -2535,7 +2531,7 @@ void RGWPutObj::execute()
   if (slo_info) {
     bufferlist manifest_bl;
     ::encode(*slo_info, manifest_bl);
-    attrs[RGW_ATTR_SLO_MANIFEST] = manifest_bl;
+    emplace_attr(RGW_ATTR_SLO_MANIFEST, std::move(manifest_bl));
 
     hash.Update((byte *)slo_info->raw_data, slo_info->raw_data_len);
     complete_etag(hash, &etag);
@@ -2547,7 +2543,7 @@ void RGWPutObj::execute()
     goto done;
   }
   bl.append(etag.c_str(), etag.size() + 1);
-  attrs[RGW_ATTR_ETAG] = bl;
+  emplace_attr(RGW_ATTR_ETAG, std::move(bl));
 
   for (iter = s->generic_attrs.begin(); iter != s->generic_attrs.end();
        ++iter) {
@@ -2565,11 +2561,11 @@ void RGWPutObj::execute()
   if (slo_info) {
     bufferlist slo_userindicator_bl;
     ::encode("True", slo_userindicator_bl);
-    attrs[RGW_ATTR_SLO_UINDICATOR] = slo_userindicator_bl;
+    emplace_attr(RGW_ATTR_SLO_UINDICATOR, std::move(slo_userindicator_bl));
   }
 
-  op_ret = processor->complete(etag, &mtime, real_time(), attrs, delete_at, if_match,
-			       if_nomatch);
+  op_ret = processor->complete(etag, &mtime, real_time(), attrs, delete_at,
+			      if_match, if_nomatch);
 
 done:
   dispose_processor(processor);
@@ -2609,7 +2605,7 @@ void RGWPostObj::execute()
   char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
   unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE];
   MD5 hash;
-  bufferlist bl, aclbl;
+  buffer::list bl, aclbl;
   int len = 0;
 
   // read in the data from the POST form
@@ -2677,17 +2673,17 @@ void RGWPostObj::execute()
   hash.Final(m);
   buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5);
 
-  policy.encode(aclbl);
   etag = calc_md5;
-
   bl.append(etag.c_str(), etag.size() + 1);
-  attrs[RGW_ATTR_ETAG] = bl;
-  attrs[RGW_ATTR_ACL] = aclbl;
+  emplace_attr(RGW_ATTR_ETAG, std::move(bl));
+
+  policy.encode(aclbl);
+  emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
 
   if (content_type.size()) {
     bufferlist ct_bl;
     ct_bl.append(content_type.c_str(), content_type.size() + 1);
-    attrs[RGW_ATTR_CONTENT_TYPE] = ct_bl;
+    emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl));
   }
 
   op_ret = processor->complete(etag, NULL, real_time(), attrs, delete_at);
@@ -2822,7 +2818,7 @@ void RGWPutMetadataBucket::pre_exec()
 
 void RGWPutMetadataBucket::execute()
 {
-  map<string, bufferlist> attrs, orig_attrs;
+  map<string, buffer::list> orig_attrs;
 
   op_ret = get_params();
   if (op_ret < 0) {
@@ -2837,26 +2833,27 @@ void RGWPutMetadataBucket::execute()
     return;
   }
 
-  orig_attrs = s->bucket_attrs;
+  orig_attrs = s->bucket_attrs; /* XXX map copy */
   prepare_add_del_attrs(orig_attrs, rmattr_names, attrs);
   populate_with_generic_attrs(s, attrs);
 
   if (has_policy) {
-    bufferlist bl;
+    buffer::list bl;
     policy.encode(bl);
-    attrs[RGW_ATTR_ACL] = bl;
+    emplace_attr(RGW_ATTR_ACL, std::move(bl));
   }
 
   if (has_cors) {
-    bufferlist bl;
+    buffer::list bl;
     cors_config.encode(bl);
-    attrs[RGW_ATTR_CORS] = bl;
+    emplace_attr(RGW_ATTR_CORS, std::move(bl));
   }
 
   s->bucket_info.swift_ver_location = swift_ver_location;
   s->bucket_info.swift_versioning = (!swift_ver_location.empty());
 
-  op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker);
+  op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
+				&s->bucket_info.objv_tracker);
 }
 
 int RGWPutMetadataObject::verify_permission()
@@ -3221,8 +3218,8 @@ int RGWCopyObj::init_common()
 
   bufferlist aclbl;
   dest_policy.encode(aclbl);
+  emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
 
-  attrs[RGW_ATTR_ACL] = aclbl;
   rgw_get_request_metadata(s->cct, s->info, attrs);
 
   map<string, string>::iterator iter;
diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h
index 825dd93..c8b1cd9 100644
--- a/src/rgw/rgw_op.h
+++ b/src/rgw/rgw_op.h
@@ -536,6 +536,7 @@ protected:
   bool has_cors;
   RGWCORSConfiguration cors_config;
   string swift_ver_location;
+  map<string, buffer::list> attrs;
   set<string> rmattr_names;
 
   bufferlist in_data;
@@ -545,6 +546,10 @@ protected:
 public:
   RGWCreateBucket() : has_cors(false) {}
 
+  void emplace_attr(std::string&& key, buffer::list&& bl) {
+    attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
+  }
+
   int verify_permission();
   void pre_exec();
   void execute();
@@ -647,7 +652,7 @@ protected:
   RGWAccessControlPolicy policy;
   const char *dlo_manifest;
   RGWSLOInfo *slo_info;
-
+  map<string, bufferlist> attrs;
   ceph::real_time mtime;
   uint64_t olh_epoch;
   string version_id;
@@ -674,6 +679,10 @@ public:
     policy.set_ctx(s->cct);
   }
 
+  void emplace_attr(std::string&& key, buffer::list&& bl) {
+    attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
+  }
+
   virtual RGWPutObjProcessor *select_processor(RGWObjectCtx& obj_ctx, bool *is_multipart);
   void dispose_processor(RGWPutObjProcessor *processor);
 
@@ -713,6 +722,10 @@ public:
 		 supplied_md5_b64(NULL), supplied_etag(NULL),
 		 data_pending(false) {}
 
+  void emplace_attr(std::string&& key, buffer::list&& bl) {
+    attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
+  }
+
   virtual void init(RGWRados *store, struct req_state *s, RGWHandler *h) {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
@@ -762,6 +775,7 @@ public:
 
 class RGWPutMetadataBucket : public RGWOp {
 protected:
+  map<string, buffer::list> attrs;
   set<string> rmattr_names;
   bool has_policy, has_cors;
   RGWAccessControlPolicy policy;
@@ -774,10 +788,15 @@ public:
     : has_policy(false), has_cors(false)
   {}
 
+  void emplace_attr(std::string&& key, buffer::list&& bl) {
+    attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
+  }
+
   virtual void init(RGWRados *store, struct req_state *s, RGWHandler *h) {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
   }
+
   int verify_permission();
   void pre_exec();
   void execute();
@@ -861,7 +880,7 @@ protected:
   ceph::real_time unmod_time;
   ceph::real_time *mod_ptr;
   ceph::real_time *unmod_ptr;
-  map<string, bufferlist> attrs;
+  map<string, buffer::list> attrs;
   string src_tenant_name, src_bucket_name;
   rgw_bucket src_bucket;
   rgw_obj_key src_object;
@@ -909,6 +928,10 @@ public:
                                   string& bucket_name,
                                   rgw_obj_key& object);
 
+  void emplace_attr(std::string&& key, buffer::list&& bl) {
+    attrs.emplace(std::move(key), std::move(bl));
+  }
+
   virtual void init(RGWRados *store, struct req_state *s, RGWHandler *h) {
     RGWOp::init(store, s, h);
     dest_policy.set_ctx(s->cct);
diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc
index 2bcc7b6..47ba962 100644
--- a/src/rgw/rgw_rados.cc
+++ b/src/rgw/rgw_rados.cc
@@ -106,14 +106,6 @@ static string RGW_DEFAULT_PERIOD_ROOT_POOL = "rgw.root";
 
 #define dout_subsys ceph_subsys_rgw
 
-struct bucket_info_entry {
-  RGWBucketInfo info;
-  real_time mtime;
-  map<string, bufferlist> attrs;
-};
-
-static RGWChainedCacheImpl<bucket_info_entry> binfo_cache;
-
 void RGWDefaultZoneGroupInfo::dump(Formatter *f) const {
   encode_json("default_zonegroup", default_zonegroup, f);
 }
@@ -1147,7 +1139,7 @@ int RGWPeriod::add_zonegroup(const RGWZoneGroup& zonegroup)
   if (zonegroup.realm_id != realm_id) {
     return 0;
   }
-  int ret = period_map.update(zonegroup);
+  int ret = period_map.update(zonegroup, cct);
   if (ret < 0) {
     ldout(cct, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl;
     return ret;
@@ -1184,9 +1176,8 @@ int RGWPeriod::update()
       master_zone = zg.master_zone;
     }
 
-    int ret = period_map.update(zg);
+    int ret = period_map.update(zg, cct);
     if (ret < 0) {
-      ldout(cct, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl;
       return ret;
     }
   }
@@ -1204,6 +1195,14 @@ int RGWPeriod::reflect()
       ldout(cct, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl;
       return r;
     }
+    if (zg.is_master_zonegroup()) {
+      // set master as default if no default exists
+      r = zg.set_as_default(true);
+      if (r == 0) {
+        ldout(cct, 1) << "Set the period's master zonegroup " << zg.get_id()
+            << " as the default" << dendl;
+      }
+    }
   }
   return 0;
 }
@@ -1549,12 +1548,7 @@ int RGWZoneParams::init(CephContext *cct, RGWRados *store, bool setup_obj, bool
     name = cct->_conf->rgw_zone;
   }
 
-  int ret = RGWSystemMetaObj::init(cct, store, setup_obj, old_format);
-  if (ret < 0) {
-    return ret;
-  }
-
-  return ret;
+  return RGWSystemMetaObj::init(cct, store, setup_obj, old_format);
 }
 
 int RGWZoneParams::read_default_id(string& default_id, bool old_format)
@@ -1620,9 +1614,11 @@ void RGWPeriodMap::decode(bufferlist::iterator& bl) {
   }
 }
 
-int RGWPeriodMap::update(const RGWZoneGroup& zonegroup)
+int RGWPeriodMap::update(const RGWZoneGroup& zonegroup, CephContext *cct)
 {
   if (zonegroup.is_master && (!master_zonegroup.empty() && zonegroup.get_id() != master_zonegroup)) {
+    ldout(cct,0) << "Error updating periodmap, multiple master zonegroups configured "<< dendl;
+    ldout(cct,0) << "master zonegroup: " << master_zonegroup << " and  " << zonegroup.get_id() <<dendl;
     return -EINVAL;
   }
   map<string, RGWZoneGroup>::iterator iter = zonegroups.find(zonegroup.get_id());
@@ -3123,6 +3119,7 @@ void RGWRados::finalize()
   if (cr_registry) {
     cr_registry->put();
   }
+  delete binfo_cache;
 }
 
 /** 
@@ -3719,7 +3716,8 @@ int RGWRados::init_complete()
   }
   ldout(cct, 20) << __func__ << " bucket index max shards: " << bucket_index_max_shards << dendl;
 
-  binfo_cache.init(this);
+  binfo_cache = new RGWChainedCacheImpl<bucket_info_entry>;
+  binfo_cache->init(this);
 
   return ret;
 }
@@ -6975,6 +6973,12 @@ bool RGWRados::is_meta_master()
   */
 bool RGWRados::is_syncing_bucket_meta(rgw_bucket& bucket)
 {
+
+  /* no current period  */
+  if (current_period.get_id().empty()) {
+    return false;
+  }
+
   /* zonegroup is not master zonegroup */
   if (!get_zonegroup().is_master) {
     return false;
@@ -7130,7 +7134,7 @@ int RGWRados::Object::complete_atomic_modification()
   cls_rgw_obj_chain chain;
   store->update_gc_chain(obj, state->manifest, &chain);
 
-  string tag = (state->obj_tag.c_str() ? state->obj_tag.c_str() : "");
+  string tag = state->obj_tag.to_str();
   int ret = store->gc->send_chain(chain, tag, false);  // do it async
 
   return ret;
@@ -10366,7 +10370,7 @@ int RGWRados::get_bucket_info(RGWObjectCtx& obj_ctx,
   string bucket_entry;
   rgw_make_bucket_entry_name(tenant, bucket_name, bucket_entry);
 
-  if (binfo_cache.find(bucket_entry, &e)) {
+  if (binfo_cache->find(bucket_entry, &e)) {
     info = e.info;
     if (pattrs)
       *pattrs = e.attrs;
@@ -10436,7 +10440,7 @@ int RGWRados::get_bucket_info(RGWObjectCtx& obj_ctx,
 
 
   /* chain to both bucket entry point and bucket instance */
-  if (!binfo_cache.put(this, bucket_entry, &e, cache_info_entries)) {
+  if (!binfo_cache->put(this, bucket_entry, &e, cache_info_entries)) {
     ldout(cct, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl;
   }
 
diff --git a/src/rgw/rgw_rados.h b/src/rgw/rgw_rados.h
index 1195aa3..612a523 100644
--- a/src/rgw/rgw_rados.h
+++ b/src/rgw/rgw_rados.h
@@ -1181,7 +1181,7 @@ struct RGWPeriodMap
   void encode(bufferlist& bl) const;
   void decode(bufferlist::iterator& bl);
 
-  int update(const RGWZoneGroup& zonegroup);
+  int update(const RGWZoneGroup& zonegroup, CephContext *cct);
 
   void dump(Formatter *f) const;
   void decode_json(JSONObj *obj);
@@ -1690,6 +1690,15 @@ struct RGWObjectCtx {
 class Finisher;
 class RGWAsyncRadosProcessor;
 
+template <class T>
+class RGWChainedCacheImpl;
+
+struct bucket_info_entry {
+  RGWBucketInfo info;
+  real_time mtime;
+  map<string, bufferlist> attrs;
+};
+
 class RGWRados
 {
   friend class RGWGC;
@@ -1797,6 +1806,9 @@ protected:
   RWLock handle_lock;
   std::map<pthread_t, int> rados_map;
 
+  using RGWChainedCacheImpl_bucket_info_entry = RGWChainedCacheImpl<bucket_info_entry>;
+  RGWChainedCacheImpl_bucket_info_entry *binfo_cache;
+
   librados::IoCtx gc_pool_ctx;        // .rgw.gc
   librados::IoCtx objexp_pool_ctx;
 
@@ -1831,6 +1843,7 @@ public:
                max_bucket_id(0), cct(NULL),
                rados(NULL), next_rados_handle(0),
                num_rados_handles(0), handle_lock("rados_handle_lock"),
+               binfo_cache(NULL),
                pools_initialized(false),
                quota_handler(NULL),
                finisher(NULL),
@@ -1904,7 +1917,7 @@ public:
     if (id == get_zonegroup().get_id()) {
       zonegroup = get_zonegroup();
     } else if (!current_period.get_id().empty()) {
-      ret = current_period.get_zonegroup(zonegroup, zonegroup_id);
+      ret = current_period.get_zonegroup(zonegroup, id);
     }
     return ret;
   }
diff --git a/src/rgw/rgw_realm_reloader.cc b/src/rgw/rgw_realm_reloader.cc
index a0c31fe..8f38e98 100644
--- a/src/rgw/rgw_realm_reloader.cc
+++ b/src/rgw/rgw_realm_reloader.cc
@@ -80,6 +80,10 @@ void RGWRealmReloader::reload()
   frontends->pause();
 
   ldout(cct, 1) << "Frontends paused" << dendl;
+
+  // TODO: make RGWRados responsible for rgw_log_usage lifetime
+  rgw_log_usage_finalize();
+
   // destroy the existing store
   RGWStoreManager::close_storage(store);
   store = nullptr;
diff --git a/src/rgw/rgw_realm_watcher.cc b/src/rgw/rgw_realm_watcher.cc
index 6e47c48..d9383d6 100644
--- a/src/rgw/rgw_realm_watcher.cc
+++ b/src/rgw/rgw_realm_watcher.cc
@@ -124,7 +124,12 @@ int RGWRealmWatcher::watch_start(RGWRealm& realm)
 int RGWRealmWatcher::watch_restart()
 {
   assert(!watch_oid.empty());
-  int r = pool_ctx.watch2(watch_oid, &watch_handle, this);
+  int r = pool_ctx.unwatch2(watch_handle);
+  if (r < 0) {
+    lderr(cct) << "Failed to unwatch on " << watch_oid
+        << " with " << cpp_strerror(-r) << dendl;
+  }
+  r = pool_ctx.watch2(watch_oid, &watch_handle, this);
   if (r < 0)
     lderr(cct) << "Failed to restart watch on " << watch_oid
         << " with " << cpp_strerror(-r) << dendl;
diff --git a/src/rgw/rgw_rest_conn.cc b/src/rgw/rgw_rest_conn.cc
index 78fe0ae..88bfbc6 100644
--- a/src/rgw/rgw_rest_conn.cc
+++ b/src/rgw/rgw_rest_conn.cc
@@ -54,7 +54,7 @@ int RGWRESTConn::forward(const rgw_user& uid, req_info& info, obj_version *objv,
   param_list_t params;
   if (!uid.empty())
     params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "uid", uid_str));
-  params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "region", self_zone_group));
+  params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "zonegroup", self_zone_group));
   if (objv) {
     params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "tag", objv->tag));
     char buf[16];
@@ -82,7 +82,7 @@ int RGWRESTConn::put_obj_init(const rgw_user& uid, rgw_obj& obj, uint64_t obj_si
   string uid_str = uid.to_str();
   param_list_t params;
   params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "uid", uid_str));
-  params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "region", self_zone_group));
+  params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "zonegroup", self_zone_group));
   *req = new RGWRESTStreamWriteRequest(cct, url, NULL, &params);
   return (*req)->put_obj_init(key, obj, obj_size, attrs);
 }
@@ -129,7 +129,7 @@ int RGWRESTConn::get_obj(const rgw_user& uid, req_info *info /* optional */, rgw
   if (!uid.empty()) {
     params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "uid", uid.to_str()));
   }
-  params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "region", self_zone_group));
+  params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "zonegroup", self_zone_group));
   if (prepend_metadata) {
     params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "prepend-metadata", self_zone_group));
   }
diff --git a/src/rgw/rgw_rest_log.cc b/src/rgw/rgw_rest_log.cc
index 854da2e..6f8bc64 100644
--- a/src/rgw/rgw_rest_log.cc
+++ b/src/rgw/rgw_rest_log.cc
@@ -163,9 +163,14 @@ void RGWOp_MDLog_ShardInfo::execute() {
   }
 
   if (period.empty()) {
-    ldout(s->cct, 5) << "Missing period id" << dendl;
-    http_ret = -EINVAL;
-    return;
+    ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
+    period = store->get_current_period_id();
+
+    if (period.empty()) {
+      ldout(s->cct, 5) << "Missing period id" << dendl;
+      http_ret = -EINVAL;
+      return;
+    }
   }
   RGWMetadataLog meta_log{s->cct, store, period};
 
@@ -217,9 +222,14 @@ void RGWOp_MDLog_Delete::execute() {
   }
 
   if (period.empty()) {
-    ldout(s->cct, 5) << "Missing period id" << dendl;
-    http_ret = -EINVAL;
-    return;
+    ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
+    period = store->get_current_period_id();
+
+    if (period.empty()) {
+      ldout(s->cct, 5) << "Missing period id" << dendl;
+      http_ret = -EINVAL;
+      return;
+    }
   }
   RGWMetadataLog meta_log{s->cct, store, period};
 
diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc
index c912fc2..2ef10a5 100644
--- a/src/rgw/rgw_rest_s3.cc
+++ b/src/rgw/rgw_rest_s3.cc
@@ -40,8 +40,7 @@ using std::get;
 
 void list_all_buckets_start(struct req_state *s)
 {
-  s->formatter->open_array_section_in_ns("ListAllMyBucketsResult",
-			      "http://s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_array_section_in_ns("ListAllMyBucketsResult", XMLNS_AWS_S3);
 }
 
 void list_all_buckets_end(struct req_state *s)
@@ -466,8 +465,7 @@ int RGWListBucket_ObjStore_S3::get_params()
 
 void RGWListBucket_ObjStore_S3::send_versioned_response()
 {
-  s->formatter->open_object_section_in_ns("ListVersionsResult",
-					  "http://s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_object_section_in_ns("ListVersionsResult", XMLNS_AWS_S3);
   if (!s->bucket_tenant.empty())
     s->formatter->dump_string("Tenant", s->bucket_tenant);
   s->formatter->dump_string("Name", s->bucket_name);
@@ -565,8 +563,7 @@ void RGWListBucket_ObjStore_S3::send_response()
     return;
   }
 
-  s->formatter->open_object_section_in_ns("ListBucketResult",
-					  "http://s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_object_section_in_ns("ListBucketResult", XMLNS_AWS_S3);
   if (!s->bucket_tenant.empty())
     s->formatter->dump_string("Tenant", s->bucket_tenant);
   s->formatter->dump_string("Name", s->bucket_name);
@@ -628,8 +625,7 @@ void RGWGetBucketLogging_ObjStore_S3::send_response()
   end_header(s, this, "application/xml");
   dump_start(s);
 
-  s->formatter->open_object_section_in_ns("BucketLoggingStatus",
-					  "http://doc.s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_object_section_in_ns("BucketLoggingStatus", XMLNS_AWS_S3);
   s->formatter->close_section();
   rgw_flush_formatter_and_reset(s, s->formatter);
 }
@@ -652,9 +648,8 @@ void RGWGetBucketLocation_ObjStore_S3::send_response()
     }
   }
 
-  s->formatter->dump_format_ns("LocationConstraint",
-			       "http://doc.s3.amazonaws.com/doc/2006-03-01/",
-			       "%s",api_name.c_str());
+  s->formatter->dump_format_ns("LocationConstraint", XMLNS_AWS_S3,
+			       "%s", api_name.c_str());
   rgw_flush_formatter_and_reset(s, s->formatter);
 }
 
@@ -664,8 +659,7 @@ void RGWGetBucketVersioning_ObjStore_S3::send_response()
   end_header(s, this, "application/xml");
   dump_start(s);
 
-  s->formatter->open_object_section_in_ns("VersioningConfiguration",
-					  "http://doc.s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_object_section_in_ns("VersioningConfiguration", XMLNS_AWS_S3);
   if (versioned) {
     const char *status = (versioning_enabled ? "Enabled" : "Suspended");
     s->formatter->dump_string("Status", status);
@@ -822,8 +816,7 @@ void RGWGetBucketWebsite_ObjStore_S3::send_response()
 
   RGWBucketWebsiteConf& conf = s->bucket_info.website_conf;
 
-  s->formatter->open_object_section_in_ns("WebsiteConfiguration",
-					  "http://doc.s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_object_section_in_ns("WebsiteConfiguration", XMLNS_AWS_S3);
   conf.dump_xml(s->formatter);
   s->formatter->close_section(); // WebsiteConfiguration
   rgw_flush_formatter_and_reset(s, s->formatter);
@@ -1993,7 +1986,7 @@ void RGWCopyObj_ObjStore_S3::send_partial_response(off_t ofs)
 
     end_header(s, this, "application/xml");
     if (op_ret == 0) {
-      s->formatter->open_object_section("CopyObjectResult");
+      s->formatter->open_object_section_in_ns("CopyObjectResult", XMLNS_AWS_S3);
     }
     sent_header = true;
   } else {
@@ -2213,8 +2206,7 @@ void RGWGetRequestPayment_ObjStore_S3::send_response()
   end_header(s, this, "application/xml");
   dump_start(s);
 
-  s->formatter->open_object_section_in_ns("RequestPaymentConfiguration",
-					  "http://s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_object_section_in_ns("RequestPaymentConfiguration", XMLNS_AWS_S3);
   const char *payer = requester_pays ? "Requester" :  "BucketOwner";
   s->formatter->dump_string("Payer", payer);
   s->formatter->close_section();
@@ -2314,8 +2306,7 @@ void RGWInitMultipart_ObjStore_S3::send_response()
   end_header(s, this, "application/xml");
   if (op_ret == 0) {
     dump_start(s);
-    s->formatter->open_object_section_in_ns("InitiateMultipartUploadResult",
-		  "http://s3.amazonaws.com/doc/2006-03-01/");
+    s->formatter->open_object_section_in_ns("InitiateMultipartUploadResult", XMLNS_AWS_S3);
     if (!s->bucket_tenant.empty())
       s->formatter->dump_string("Tenant", s->bucket_tenant);
     s->formatter->dump_string("Bucket", s->bucket_name);
@@ -2350,8 +2341,7 @@ void RGWCompleteMultipart_ObjStore_S3::send_response()
   end_header(s, this, "application/xml");
   if (op_ret == 0) { 
     dump_start(s);
-    s->formatter->open_object_section_in_ns("CompleteMultipartUploadResult",
-			  "http://s3.amazonaws.com/doc/2006-03-01/");
+    s->formatter->open_object_section_in_ns("CompleteMultipartUploadResult", XMLNS_AWS_S3);
     if (!s->bucket_tenant.empty()) {
       if (s->info.domain.length()) {
         s->formatter->dump_format("Location", "%s.%s.%s",
@@ -2395,8 +2385,7 @@ void RGWListMultipart_ObjStore_S3::send_response()
 
   if (op_ret == 0) {
     dump_start(s);
-    s->formatter->open_object_section_in_ns("ListPartsResult",
-		    "http://s3.amazonaws.com/doc/2006-03-01/");
+    s->formatter->open_object_section_in_ns("ListPartsResult", XMLNS_AWS_S3);
     map<uint32_t, RGWUploadPartInfo>::iterator iter;
     map<uint32_t, RGWUploadPartInfo>::reverse_iterator test_iter;
     int cur_max = 0;
@@ -2448,7 +2437,7 @@ void RGWListBucketMultiparts_ObjStore_S3::send_response()
   if (op_ret < 0)
     return;
 
-  s->formatter->open_object_section("ListMultipartUploadsResult");
+  s->formatter->open_object_section_in_ns("ListMultipartUploadsResult", XMLNS_AWS_S3);
   if (!s->bucket_tenant.empty())
     s->formatter->dump_string("Tenant", s->bucket_tenant);
   s->formatter->dump_string("Bucket", s->bucket_name);
@@ -2533,8 +2522,7 @@ void RGWDeleteMultiObj_ObjStore_S3::begin_response()
 
   dump_start(s);
   end_header(s, this, "application/xml");
-  s->formatter->open_object_section_in_ns("DeleteResult",
-					  "http://s3.amazonaws.com/doc/2006-03-01/");
+  s->formatter->open_object_section_in_ns("DeleteResult", XMLNS_AWS_S3);
 
   rgw_flush_formatter(s, s->formatter);
 }
@@ -3523,24 +3511,33 @@ int RGW_Auth_S3::authorize_v4(RGWRados *store, struct req_state *s)
 
   /* handle request payload */
 
-  /* from rfc2616 - 4.3 Message Body
-   *
-   * "The presence of a message-body in a request is signaled by the inclusion of a
-   *  Content-Length or Transfer-Encoding header field in the request's message-headers."
-   */
-
   s->aws4_auth->payload_hash = "";
 
   string request_payload;
 
   bool unsigned_payload = false;
+
   if (using_qs) {
+    /* query parameters auth */
     unsigned_payload = true;
+  } else {
+    /* header auth */
+    const char *request_payload_hash = s->info.env->get("HTTP_X_AMZ_CONTENT_SHA256");
+    if (request_payload_hash && string("UNSIGNED-PAYLOAD").compare(request_payload_hash) == 0) {
+      unsigned_payload = true;
+    }
   }
 
-  if (using_qs || ((s->content_length == 0) && s->info.env->get("HTTP_TRANSFER_ENCODING") == NULL)) {
+  /* from rfc2616 - 4.3 Message Body
+   *
+   * "The presence of a message-body in a request is signaled by the inclusion of a
+   *  Content-Length or Transfer-Encoding header field in the request's message-headers."
+   */
+  bool body_available = s->content_length != 0 || s->info.env->get("HTTP_TRANSFER_ENCODING") != NULL;
+
+  if (unsigned_payload || !body_available) {
 
-    /* requests lacking of body are authenticated now */
+    /* requests lacking of body or shipping with 'UNSIGNED-PAYLOAD' are authenticated now */
 
     /* complete aws4 auth */
 
diff --git a/src/rgw/rgw_sync.h b/src/rgw/rgw_sync.h
index dcb3a4e..228728b 100644
--- a/src/rgw/rgw_sync.h
+++ b/src/rgw/rgw_sync.h
@@ -429,8 +429,6 @@ public:
 };
 
 class RGWShardCollectCR : public RGWCoroutine {
-  CephContext *cct;
-
   int cur_shard;
   int current_running;
   int max_concurrent;
diff --git a/src/rgw/rgw_user.h b/src/rgw/rgw_user.h
index da40d85..6335fb6 100644
--- a/src/rgw/rgw_user.h
+++ b/src/rgw/rgw_user.h
@@ -25,6 +25,8 @@ using namespace std;
 #define PUBLIC_ID_LEN 20
 #define RAND_SUBUSER_LEN 5
 
+#define XMLNS_AWS_S3 "http://s3.amazonaws.com/doc/2006-03-01/"
+
 /**
  * A string wrapper that includes encode/decode functions
  * for easily accessing a UID in all forms
diff --git a/src/test/bufferlist.cc b/src/test/bufferlist.cc
index 008b8cd..b257407 100644
--- a/src/test/bufferlist.cc
+++ b/src/test/bufferlist.cc
@@ -1390,6 +1390,24 @@ TEST(BufferList, buffers) {
   ASSERT_EQ((unsigned)1, bl.get_num_buffers());
 }
 
+TEST(BufferList, to_str) {
+  {
+    bufferlist bl;
+    bl.append("foo");
+    ASSERT_EQ(bl.to_str(), string("foo"));
+  }
+  {
+    bufferptr a("foobarbaz", 9);
+    bufferptr b("123456789", 9);
+    bufferptr c("ABCDEFGHI", 9);
+    bufferlist bl;
+    bl.append(a);
+    bl.append(b);
+    bl.append(c);
+    ASSERT_EQ(bl.to_str(), string("foobarbaz123456789ABCDEFGHI"));
+  }
+}
+
 TEST(BufferList, get_contiguous) {
   {
     bufferptr a("foobarbaz", 9);
diff --git a/src/test/centos-6/ceph.spec.in b/src/test/centos-6/ceph.spec.in
index 3a5a6f7..26928f7 100644
--- a/src/test/centos-6/ceph.spec.in
+++ b/src/test/centos-6/ceph.spec.in
@@ -1040,6 +1040,7 @@ if ! getent passwd ceph >/dev/null ; then
     CEPH_USER_ID_OPTION=""
     getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
     useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
+fi
 %endif
 exit 0
 
diff --git a/src/test/centos-7/ceph.spec.in b/src/test/centos-7/ceph.spec.in
index 3a5a6f7..26928f7 100644
--- a/src/test/centos-7/ceph.spec.in
+++ b/src/test/centos-7/ceph.spec.in
@@ -1040,6 +1040,7 @@ if ! getent passwd ceph >/dev/null ; then
     CEPH_USER_ID_OPTION=""
     getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
     useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
+fi
 %endif
 exit 0
 
diff --git a/src/test/ceph_objectstore_tool.py b/src/test/ceph_objectstore_tool.py
index 81af00a..4182103 100755
--- a/src/test/ceph_objectstore_tool.py
+++ b/src/test/ceph_objectstore_tool.py
@@ -52,7 +52,7 @@ def wait_for_health():
 
 
 def get_pool_id(name, nullfd):
-    cmd = "./ceph osd pool stats {pool}".format(pool=name).split()
+    cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split()
     # pool {pool} id # .... grab the 4 field
     return check_output(cmd, stderr=nullfd).split()[3]
 
@@ -139,7 +139,7 @@ def cat_file(level, filename):
 def vstart(new, opt=""):
     print "vstarting....",
     NEW = new and "-n" or ""
-    call("MON=1 OSD=4 CEPH_PORT=7400 ./vstart.sh -l {new} -d mon osd {opt} > /dev/null 2>&1".format(new=NEW, opt=opt), shell=True)
+    call("MON=1 OSD=4 CEPH_PORT=7400 {path}/src/vstart.sh -l {new} -d mon osd {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
     print "DONE"
 
 
@@ -198,7 +198,7 @@ def verify(DATADIR, POOL, NAME_PREFIX, db):
             os.unlink(TMPFILE)
         except:
             pass
-        cmd = "./rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace)
+        cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN)
         logging.debug(cmd)
         call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
         cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE)
@@ -212,7 +212,7 @@ def verify(DATADIR, POOL, NAME_PREFIX, db):
         except:
             pass
         for key, val in db[nspace][file]["xattr"].iteritems():
-            cmd = "./rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace)
+            cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN)
             logging.debug(cmd)
             getval = check_output(cmd, shell=True, stderr=nullfd)
             logging.debug("getxattr {key} {val}".format(key=key, val=getval))
@@ -221,7 +221,7 @@ def verify(DATADIR, POOL, NAME_PREFIX, db):
                 ERRORS += 1
                 continue
         hdr = db[nspace][file].get("omapheader", "")
-        cmd = "./rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE)
+        cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
         logging.debug(cmd)
         ret = call(cmd, shell=True, stderr=nullfd)
         if ret != 0:
@@ -239,7 +239,7 @@ def verify(DATADIR, POOL, NAME_PREFIX, db):
                 logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr))
                 ERRORS += 1
         for key, val in db[nspace][file]["omap"].iteritems():
-            cmd = "./rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE)
+            cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN)
             logging.debug(cmd)
             ret = call(cmd, shell=True, stderr=nullfd)
             if ret != 0:
@@ -368,12 +368,13 @@ def test_dump_journal(CFSD_PREFIX, osds):
     return ERRORS
 
 
-CEPH_DIR = "ceph_objectstore_tool_dir"
+CEPH_DIR = os.environ['CEPH_BUILD_DIR'] + "/ceph_objectstore_tool_dir"
 CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf')
-
+CEPH_BIN = os.environ['CEPH_BIN']
+CEPH_ROOT = os.environ['CEPH_ROOT']
 
 def kill_daemons():
-    call("./init-ceph -c {conf} stop osd mon > /dev/null 2>&1".format(conf=CEPH_CONF), shell=True)
+    call("{path}/init-ceph -c {conf} stop osd mon > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True)
 
 
 def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
@@ -424,18 +425,18 @@ def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
 
     new_crush_file = tempfile.NamedTemporaryFile(delete=False)
     old_crush_file = tempfile.NamedTemporaryFile(delete=False)
-    ret = call("./osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
-                                                                          crush_file=old_crush_file.name),
+    ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
+                                                                          crush_file=old_crush_file.name, path=CEPH_BIN),
                stdout=subprocess.DEVNULL,
                stderr=subprocess.DEVNULL,
                shell=True)
     assert(ret == 0)
 
     for osd_id in osd_ids:
-        cmd = "./crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
+        cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id,
                                                                                                           crush_file=old_crush_file.name,
                                                                                                           weight=weight,
-                                                                                                          new_crush_file=new_crush_file.name)
+                                                                                                          new_crush_file=new_crush_file.name, path=CEPH_BIN)
         ret = call(cmd, stdout=subprocess.DEVNULL, shell=True)
         assert(ret == 0)
         old_crush_file, new_crush_file = new_crush_file, old_crush_file
@@ -444,8 +445,8 @@ def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight):
     old_crush_file, new_crush_file = new_crush_file, old_crush_file
     old_crush_file.close()
 
-    ret = call("./osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
-                                                                               crush_file=new_crush_file.name),
+    ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
+                                                                               crush_file=new_crush_file.name, path=CEPH_BIN),
                stdout=subprocess.DEVNULL,
                stderr=subprocess.DEVNULL,
                shell=True)
@@ -475,13 +476,13 @@ def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
     # osdmaptool, but please keep in mind, they are different:
     #    item weights in crush map versus weight associated with each osd in osdmap
     crush_file = tempfile.NamedTemporaryFile(delete=False)
-    ret = call("./osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
-                                                                               crush_file=crush_file.name),
+    ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name,
+                                                                               crush_file=crush_file.name, path=CEPH_BIN),
                stdout=subprocess.DEVNULL,
                shell=True)
     assert(ret == 0)
-    output = check_output("./crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
-                                                                                          num_osd=len(osd_ids)),
+    output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name,
+                                                                                          num_osd=len(osd_ids), path=CEPH_BIN),
                           stderr=subprocess.DEVNULL,
                           shell=True)
     weights = []
@@ -596,27 +597,27 @@ def main(argv):
     pid = os.getpid()
     TESTDIR = "/tmp/test.{pid}".format(pid=pid)
     DATADIR = "/tmp/data.{pid}".format(pid=pid)
-    CFSD_PREFIX = "./ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} "
+    CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} "
     PROFNAME = "testecprofile"
 
     os.environ['CEPH_CONF'] = CEPH_CONF
     vstart(new=True)
     wait_for_health()
 
-    cmd = "./ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT)
+    cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN)
     logging.debug(cmd)
     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
     REPID = get_pool_id(REP_POOL, nullfd)
 
     print "Created Replicated pool #{repid}".format(repid=REPID)
 
-    cmd = "./ceph osd erasure-code-profile set {prof} ruleset-failure-domain=osd".format(prof=PROFNAME)
+    cmd = "{path}/ceph osd erasure-code-profile set {prof} ruleset-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN)
     logging.debug(cmd)
     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
-    cmd = "./ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME)
+    cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN)
     logging.debug(cmd)
     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
-    cmd = "./ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT)
+    cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN)
     logging.debug(cmd)
     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
     ECID = get_pool_id(EC_POOL, nullfd)
@@ -657,7 +658,7 @@ def main(argv):
                 fd.write(data)
             fd.close()
 
-            cmd = "./rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace)
+            cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
             logging.debug(cmd)
             ret = call(cmd, shell=True, stderr=nullfd)
             if ret != 0:
@@ -676,7 +677,7 @@ def main(argv):
                     continue
                 mykey = "key{i}-{k}".format(i=i, k=k)
                 myval = "val{i}-{k}".format(i=i, k=k)
-                cmd = "./rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace)
+                cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
                 logging.debug(cmd)
                 ret = call(cmd, shell=True)
                 if ret != 0:
@@ -687,7 +688,7 @@ def main(argv):
             # Create omap header in all objects but REPobject1
             if i < ATTR_OBJS + 1 and i != 1:
                 myhdr = "hdr{i}".format(i=i)
-                cmd = "./rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace)
+                cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN)
                 logging.debug(cmd)
                 ret = call(cmd, shell=True)
                 if ret != 0:
@@ -701,7 +702,7 @@ def main(argv):
                     continue
                 mykey = "okey{i}-{k}".format(i=i, k=k)
                 myval = "oval{i}-{k}".format(i=i, k=k)
-                cmd = "./rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace)
+                cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
                 logging.debug(cmd)
                 ret = call(cmd, shell=True)
                 if ret != 0:
@@ -709,7 +710,7 @@ def main(argv):
                 db[nspace][NAME]["omap"][mykey] = myval
 
     # Create some clones
-    cmd = "./rados -p {pool} mksnap snap1".format(pool=REP_POOL)
+    cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN)
     logging.debug(cmd)
     call(cmd, shell=True)
 
@@ -740,7 +741,7 @@ def main(argv):
                 fd.write(data)
             fd.close()
 
-            cmd = "./rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace)
+            cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
             logging.debug(cmd)
             ret = call(cmd, shell=True, stderr=nullfd)
             if ret != 0:
@@ -774,7 +775,7 @@ def main(argv):
                 fd.write(data)
             fd.close()
 
-            cmd = "./rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace)
+            cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
             logging.debug(cmd)
             ret = call(cmd, shell=True, stderr=nullfd)
             if ret != 0:
@@ -793,7 +794,7 @@ def main(argv):
                     continue
                 mykey = "key{i}-{k}".format(i=i, k=k)
                 myval = "val{i}-{k}".format(i=i, k=k)
-                cmd = "./rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace)
+                cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN)
                 logging.debug(cmd)
                 ret = call(cmd, shell=True)
                 if ret != 0:
@@ -876,10 +877,10 @@ def main(argv):
     cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
     ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE))
 
-    cmd = "./ceph-objectstore-tool --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD)
+    cmd = "{path}/ceph-objectstore-tool --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN)
     ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory")
 
-    cmd = "./ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal"
+    cmd = "{path}/ceph-objectstore-tool --journal-path BAD_JOURNAL_PATH --op dump-journal".format(path=CEPH_BIN)
     ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: (2) No such file or directory")
 
     # On import can't use stdin from a terminal
@@ -892,11 +893,11 @@ def main(argv):
 
     # Specify a bad --type
     os.mkdir(OSDDIR + "/fakeosd")
-    cmd = ("./ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG)
+    cmd = ("{path}/ceph-objectstore-tool --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN)
     ERRORS += test_failure(cmd, "Unable to create store of type foobar")
 
     # Don't specify a data-path
-    cmd = "./ceph-objectstore-tool --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG)
+    cmd = "{path}/ceph-objectstore-tool --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN)
     ERRORS += test_failure(cmd, "Must provide --data-path")
 
     cmd = (CFSD_PREFIX + "--op remove").format(osd=ONEOSD)
@@ -1675,7 +1676,7 @@ def main(argv):
 
     if EXP_ERRORS == 0:
         NEWPOOL = "rados-import-pool"
-        cmd = "./rados mkpool {pool}".format(pool=NEWPOOL)
+        cmd = "{path}/rados mkpool {pool}".format(pool=NEWPOOL, path=CEPH_BIN)
         logging.debug(cmd)
         ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
 
@@ -1690,26 +1691,26 @@ def main(argv):
                 if first:
                     first = False
                     # This should do nothing
-                    cmd = "./rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file)
+                    cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
                     logging.debug(cmd)
                     ret = call(cmd, shell=True, stdout=nullfd)
                     if ret != 0:
                         logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret))
                         ERRORS += 1
-                    cmd = "./rados -p {pool} ls".format(pool=NEWPOOL)
+                    cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN)
                     logging.debug(cmd)
                     data = check_output(cmd, shell=True)
                     if data:
                         logging.error("'{data}'".format(data=data))
                         logging.error("Found objects after dry-run")
                         ERRORS += 1
-                cmd = "./rados import -p {pool} {file}".format(pool=NEWPOOL, file=file)
+                cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
                 logging.debug(cmd)
                 ret = call(cmd, shell=True, stdout=nullfd)
                 if ret != 0:
                     logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret))
                     ERRORS += 1
-                cmd = "./rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file)
+                cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN)
                 logging.debug(cmd)
                 ret = call(cmd, shell=True, stdout=nullfd)
                 if ret != 0:
@@ -1733,11 +1734,11 @@ def main(argv):
     SPLIT_OBJ_COUNT = 5
     SPLIT_NSPACE_COUNT = 2
     SPLIT_NAME = "split"
-    cmd = "./ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT)
+    cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN)
     logging.debug(cmd)
     call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
     SPLITID = get_pool_id(SPLIT_POOL, nullfd)
-    pool_size = int(check_output("./ceph osd pool get {pool} size".format(pool=SPLIT_POOL), shell=True, stderr=nullfd).split(" ")[1])
+    pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1])
     EXP_ERRORS = 0
     RM_ERRORS = 0
     IMP_ERRORS = 0
@@ -1767,7 +1768,7 @@ def main(argv):
                 fd.write(data)
             fd.close()
 
-            cmd = "./rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace)
+            cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN)
             logging.debug(cmd)
             ret = call(cmd, shell=True, stderr=nullfd)
             if ret != 0:
@@ -1800,7 +1801,7 @@ def main(argv):
         vstart(new=False)
         wait_for_health()
 
-        cmd = "./ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL)
+        cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN)
         logging.debug(cmd)
         ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
         time.sleep(5)
diff --git a/src/test/cephtool-test-mds.sh b/src/test/cephtool-test-mds.sh
index 0fc2151..1c582ff 100755
--- a/src/test/cephtool-test-mds.sh
+++ b/src/test/cephtool-test-mds.sh
@@ -15,8 +15,10 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
+CEPH_ROOT=${CEPH_ROOT:-..}
+
 CEPH_CLI_TEST_DUP_COMMAND=1 \
-MDS=1 MON=1 OSD=3 CEPH_START='mon osd mds' CEPH_PORT=7200 test/vstart_wrapper.sh \
-    ../qa/workunits/cephtool/test.sh \
+MDS=1 MON=1 OSD=3 CEPH_START='mon osd mds' CEPH_PORT=7200 $CEPH_ROOT/src/test/vstart_wrapper.sh \
+    $CEPH_ROOT/qa/workunits/cephtool/test.sh \
     --test-mds \
     --asok-does-not-need-root
diff --git a/src/test/cephtool-test-mon.sh b/src/test/cephtool-test-mon.sh
index d4da5f2..34d3267 100755
--- a/src/test/cephtool-test-mon.sh
+++ b/src/test/cephtool-test-mon.sh
@@ -15,10 +15,12 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
+CEPH_ROOT=${CEPH_ROOT:-..}
+
 CEPH_CLI_TEST_DUP_COMMAND=1 \
 # uses CEPH_PORT going from 7202 7203 and 7204 because
 # it starts at 7202 and runs 3 mons (see vstart.sh)
-MON=3 OSD=3 CEPH_START='mon osd' CEPH_PORT=7202 test/vstart_wrapper.sh \
-    ../qa/workunits/cephtool/test.sh \
+MON=3 OSD=3 CEPH_START='mon osd' CEPH_PORT=7202 $CEPH_ROOT/src/test/vstart_wrapper.sh \
+    $CEPH_ROOT/qa/workunits/cephtool/test.sh \
     --test-mon \
     --asok-does-not-need-root
diff --git a/src/test/cephtool-test-osd.sh b/src/test/cephtool-test-osd.sh
index c016d24..247ad11 100755
--- a/src/test/cephtool-test-osd.sh
+++ b/src/test/cephtool-test-osd.sh
@@ -15,8 +15,10 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
+CEPH_ROOT=${CEPH_ROOT:-..}
+
 CEPH_CLI_TEST_DUP_COMMAND=1 \
-MON=1 OSD=3 CEPH_START='mon osd' CEPH_PORT=7201 test/vstart_wrapper.sh \
-    ../qa/workunits/cephtool/test.sh \
+MON=1 OSD=3 CEPH_START='mon osd' CEPH_PORT=7201 $CEPH_ROOT/src/test/vstart_wrapper.sh \
+    $CEPH_ROOT/qa/workunits/cephtool/test.sh \
     --test-osd \
     --asok-does-not-need-root
diff --git a/src/test/cephtool-test-rados.sh b/src/test/cephtool-test-rados.sh
index 8f9b551..402c1c5 100755
--- a/src/test/cephtool-test-rados.sh
+++ b/src/test/cephtool-test-rados.sh
@@ -15,5 +15,5 @@
 # GNU Library Public License for more details.
 #
 CEPH_CLI_TEST_DUP_COMMAND=1 \
-MON=1 OSD=3 CEPH_START='mon osd' CEPH_PORT=7205 test/vstart_wrapper.sh \
-    test/test_rados_tool.sh
+MON=1 OSD=3 CEPH_START='mon osd' CEPH_PORT=7205 $CEPH_ROOT/src/test/vstart_wrapper.sh \
+    $CEPH_ROOT/src/test/test_rados_tool.sh
diff --git a/src/test/common/test_async_compressor.cc b/src/test/common/test_async_compressor.cc
index e80a499..4ea8dc5 100644
--- a/src/test/common/test_async_compressor.cc
+++ b/src/test/common/test_async_compressor.cc
@@ -210,6 +210,10 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("plugin_dir", directory, false, false);
+
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
 }
diff --git a/src/test/common/test_blkdev.cc b/src/test/common/test_blkdev.cc
index 349824b..b2379eb 100644
--- a/src/test/common/test_blkdev.cc
+++ b/src/test/common/test_blkdev.cc
@@ -4,6 +4,7 @@
 #include <iostream>
 #include <string.h>
 #include <errno.h>
+#include <stdlib.h>
 
 #include "include/types.h"
 #include "common/blkdev.h"
@@ -19,15 +20,18 @@ TEST(blkdev, get_block_device_base) {
   ASSERT_EQ(-EINVAL, get_block_device_base("/etc/notindev", buf, 100));
 
   for (int i=0; i<2; ++i) {
-    const char *root = "";
-    if (i == 0)
-      root = "test/common/test_blkdev_sys_block";
-    set_block_device_sandbox_dir(root);
+    string root;
+    if (i == 0) {
+      const char* env = getenv("CEPH_ROOT");
+      ASSERT_NE(env, nullptr) << "Environment Variable CEPH_ROOT not found!";
+      root = string(env) + "/src/test/common/test_blkdev_sys_block";
+    }
+    set_block_device_sandbox_dir(root.c_str());
 
     // work backwards
-    sprintf(buf, "%s/sys/block", root);
+    sprintf(buf, "%s/sys/block", root.c_str());
     DIR *dir = opendir(buf);
-    ASSERT_TRUE(dir);
+    ASSERT_NE(dir, nullptr);
     while (!::readdir_r(dir, reinterpret_cast<struct dirent*>(buf), &de)) {
       if (!de)
 	break;
@@ -50,7 +54,7 @@ TEST(blkdev, get_block_device_base) {
 	     (int)block_device_support_discard(base));
 
       char subdirfn[PATH_MAX];
-      sprintf(subdirfn, "%s/sys/block/%s", root, de->d_name);
+      sprintf(subdirfn, "%s/sys/block/%s", root.c_str(), de->d_name);
       DIR *subdir = opendir(subdirfn);
       ASSERT_TRUE(subdir);
       while (!::readdir_r(subdir, reinterpret_cast<struct dirent*>(buf2), &de2)) {
diff --git a/src/test/common/test_weighted_priority_queue.cc b/src/test/common/test_weighted_priority_queue.cc
index b851979..f94af71 100644
--- a/src/test/common/test_weighted_priority_queue.cc
+++ b/src/test/common/test_weighted_priority_queue.cc
@@ -114,6 +114,13 @@ protected:
         // in the strict queue.
         LQ::reverse_iterator ri = strictq.rbegin();
         EXPECT_EQ(std::get<0>(r), ri->first);
+        // Check that if there are multiple classes in a priority
+        // that it is not dequeueing the same class each time.
+        LastKlass::iterator si = last_strict.find(std::get<0>(r));
+        if (strictq[std::get<0>(r)].size() > 1 && si != last_strict.end()) {
+	  EXPECT_NE(std::get<1>(r), si->second);
+	}
+        last_strict[std::get<0>(r)] = std::get<1>(r);
 
 	Item t = strictq[std::get<0>(r)][std::get<1>(r)].front().second;
         EXPECT_EQ(std::get<2>(r), std::get<2>(t));
@@ -125,6 +132,14 @@ protected:
 	  strictq.erase(std::get<0>(r));
 	}
       } else {
+        // Check that if there are multiple classes in a priority
+        // that it is not dequeueing the same class each time.
+        LastKlass::iterator si = last_norm.find(std::get<0>(r));
+        if (normq[std::get<0>(r)].size() > 1 && si != last_norm.end()) {
+	  EXPECT_NE(std::get<1>(r), si->second);
+	}
+        last_norm[std::get<0>(r)] = std::get<1>(r);
+
 	Item t = normq[std::get<0>(r)][std::get<1>(r)].front().second;
         EXPECT_EQ(std::get<2>(r), std::get<2>(t));
         normq[std::get<0>(r)][std::get<1>(r)].pop_front();
@@ -191,6 +206,18 @@ struct Greater {
   }
 };
 
+TEST_F(WeightedPriorityQueueTest, wpq_test_remove_by_filter_null) {
+  WQ wq(0, 0);
+  LQ strictq, normq;
+  unsigned num_items = 100;
+  fill_queue(wq, strictq, normq, num_items);
+  // Pick a value that we didn't enqueue
+  const Greater<Item> pred(std::make_tuple(0, 0, 1 << 17));
+  Removed wq_removed;
+  wq.remove_by_filter(pred, &wq_removed);
+  EXPECT_EQ(0u, wq_removed.size());
+}
+
 TEST_F(WeightedPriorityQueueTest, wpq_test_remove_by_filter) {
   WQ wq(0, 0);
   LQ strictq, normq;
@@ -240,6 +267,17 @@ TEST_F(WeightedPriorityQueueTest, wpq_test_remove_by_filter) {
   }
 }
 
+TEST_F(WeightedPriorityQueueTest, wpq_test_remove_by_class_null) {
+  WQ wq(0, 0);
+  LQ strictq, normq;
+  unsigned num_items = 10;
+  fill_queue(wq, strictq, normq, num_items);
+  Removed wq_removed;
+  // Pick a klass that was not enqueued
+  wq.remove_by_class(klasses + 1, &wq_removed);
+  EXPECT_EQ(0u, wq_removed.size());
+}
+
 TEST_F(WeightedPriorityQueueTest, wpq_test_remove_by_class) {
   WQ wq(0, 0);
   LQ strictq, normq;
diff --git a/src/test/compressor/test_compression_plugin.cc b/src/test/compressor/test_compression_plugin.cc
index e898b80..45cffaf 100644
--- a/src/test/compressor/test_compression_plugin.cc
+++ b/src/test/compressor/test_compression_plugin.cc
@@ -17,6 +17,7 @@
 #include <errno.h>
 #include <signal.h>
 #include <stdlib.h>
+#include <string.h>
 #include <gtest/gtest.h>
 #include "global/global_init.h"
 #include "compressor/CompressionPlugin.h"
@@ -26,7 +27,8 @@
 
 TEST(CompressionPlugin, all)
 {
-  string directory(".libs");
+  const char* env = getenv("CEPH_LIB");
+  std::string directory(env ? env : "lib");
   CompressorRef compressor;
   PluginRegistry *reg = g_ceph_context->get_plugin_registry();
   EXPECT_TRUE(reg);
@@ -52,11 +54,17 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  int r = system("mkdir -p .libs/compressor");
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  string mkdir_compressor = "mkdir -p " + directory + "/compressor";
+  int r = system(mkdir_compressor.c_str());
   (void)r;
-  r = system("cp .libs/libceph_example.so* .libs/compressor/");
+
+  string cp_libceph_example = "cp " + directory + "/libceph_example.so* " + directory + "/compressor/";
+  r = system(cp_libceph_example.c_str());
   (void)r;
-  g_conf->set_val("plugin_dir", ".libs", false, false);
+
+  g_conf->set_val("plugin_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/compressor/test_compression_plugin_snappy.cc b/src/test/compressor/test_compression_plugin_snappy.cc
index 2855f7a..9e733f4 100644
--- a/src/test/compressor/test_compression_plugin_snappy.cc
+++ b/src/test/compressor/test_compression_plugin_snappy.cc
@@ -38,12 +38,17 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  int r = system("mkdir -p .libs/compressor");
+  const char* env = getenv("CEPH_LIB");
+  std::string directory(env ? env : "lib");
+  string mkdir_compressor = "mkdir -p " + directory + "/compressor";
+  int r = system(mkdir_compressor.c_str());
   (void)r;
-  r = system("cp .libs/libceph_snappy.so* .libs/compressor/");
+
+  string cp_libceph_snappy = "cp " + directory + "/libceph_snappy.so* " + directory + "/compressor/";
+  r = system(cp_libceph_snappy.c_str());
   (void)r;
 
-  g_conf->set_val("plugin_dir", ".libs", false, false);
+  g_conf->set_val("plugin_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/compressor/test_compression_plugin_zlib.cc b/src/test/compressor/test_compression_plugin_zlib.cc
index 0e956a1..5af5d14 100644
--- a/src/test/compressor/test_compression_plugin_zlib.cc
+++ b/src/test/compressor/test_compression_plugin_zlib.cc
@@ -37,12 +37,18 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  int r = system("mkdir -p .libs/compressor");
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  string mkdir_compressor = "mkdir -p " + directory + "/compressor";
+  int r = system(mkdir_compressor.c_str());
   (void)r;
-  r = system("cp .libs/libceph_zlib.so* .libs/compressor/");
+
+  string cp_libceph_zlib = "cp " + directory + "/libceph_zlib.so* " + directory + "/compressor/";
+  r = system(cp_libceph_zlib.c_str());
   (void)r;
 
-  g_conf->set_val("plugin_dir", ".libs", false, false);
+  g_conf->set_val("plugin_dir", directory, false, false);
+
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/compressor/test_compression_snappy.cc b/src/test/compressor/test_compression_snappy.cc
index 556390b..bbd315c 100644
--- a/src/test/compressor/test_compression_snappy.cc
+++ b/src/test/compressor/test_compression_snappy.cc
@@ -77,7 +77,9 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("compression_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("plugin_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/compressor/test_compression_zlib.cc b/src/test/compressor/test_compression_zlib.cc
index 70df87f..6ba6445 100644
--- a/src/test/compressor/test_compression_zlib.cc
+++ b/src/test/compressor/test_compression_zlib.cc
@@ -15,6 +15,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 #include <string.h>
 #include <gtest/gtest.h>
 #include "global/global_init.h"
@@ -26,7 +27,7 @@
 TEST(CompressionZlib, compress_decompress)
 {
   CompressionZlib sp;
-  EXPECT_EQ(sp.get_method_name(), "zlib");
+  EXPECT_STREQ(sp.get_method_name(), "zlib");
   const char* test = "This is test text";
   int len = strlen(test);
   bufferlist in, out;
@@ -44,7 +45,7 @@ TEST(CompressionZlib, compress_decompress)
 TEST(CompressionZlib, compress_decompress_chunk)
 {
   CompressionZlib sp;
-  EXPECT_EQ(sp.get_method_name(), "zlib");
+  EXPECT_STREQ(sp.get_method_name(), "zlib");
   const char* test = "This is test text";
   buffer::ptr test2 ("1234567890", 10);
   int len = strlen(test);
@@ -68,7 +69,9 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("compression_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("compression_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/encoding/check-generated.sh b/src/test/encoding/check-generated.sh
index c34fce8..20d0c4f 100755
--- a/src/test/encoding/check-generated.sh
+++ b/src/test/encoding/check-generated.sh
@@ -1,6 +1,6 @@
 #!/bin/bash -e
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 dir=$1
 
@@ -15,8 +15,8 @@ failed=0
 numtests=0
 echo "checking ceph-dencoder generated test instances..."
 echo "numgen type"
-for type in `./ceph-dencoder list_types`; do
-    num=`./ceph-dencoder type $type count_tests`
+for type in `ceph-dencoder list_types`; do
+    num=`ceph-dencoder type $type count_tests`
     echo "$num $type"
     for n in `seq 1 1 $num 2>/dev/null`; do
 	safe_type=$type
@@ -26,10 +26,10 @@ for type in `./ceph-dencoder list_types`; do
 	fi
 
 	pids=""
-	run_in_background pids bash -c "./ceph-dencoder type $safe_type select_test $n dump_json > $tmp1"
-	run_in_background pids bash -c "./ceph-dencoder type $safe_type select_test $n encode decode dump_json > $tmp2"
-	run_in_background pids bash -c "./ceph-dencoder type $safe_type select_test $n copy dump_json > $tmp3"
-	run_in_background pids bash -c "./ceph-dencoder type $safe_type select_test $n copy_ctor dump_json > $tmp4"
+	run_in_background pids bash -c "ceph-dencoder type $safe_type select_test $n dump_json > $tmp1"
+	run_in_background pids bash -c "ceph-dencoder type $safe_type select_test $n encode decode dump_json > $tmp2"
+	run_in_background pids bash -c "ceph-dencoder type $safe_type select_test $n copy dump_json > $tmp3"
+	run_in_background pids bash -c "ceph-dencoder type $safe_type select_test $n copy_ctor dump_json > $tmp4"
 	wait_background pids
 
 	if [ $? -ne 0 ]; then
@@ -43,7 +43,7 @@ for type in `./ceph-dencoder list_types`; do
 	# the sorted json output.  this is a weaker test, but is better
 	# than nothing.
 	deterministic=0
-	if ./ceph-dencoder type $type is_deterministic; then
+	if ceph-dencoder type $type is_deterministic; then
 	    deterministic=1
 	fi
 
@@ -57,37 +57,37 @@ for type in `./ceph-dencoder list_types`; do
 
 	if ! cmp $tmp1 $tmp2; then
 	    echo "**** $type test $n dump_json check failed ****"
-	    echo "   ./ceph-dencoder type $type select_test $n dump_json > $tmp1"
-	    echo "   ./ceph-dencoder type $type select_test $n encode decode dump_json > $tmp2"
+	    echo "   ceph-dencoder type $type select_test $n dump_json > $tmp1"
+	    echo "   ceph-dencoder type $type select_test $n encode decode dump_json > $tmp2"
 	    echo "   diff $tmp1 $tmp2"
 	    failed=$(($failed + 1))
 	fi
 
 	if ! cmp $tmp1 $tmp3; then
 	    echo "**** $type test $n copy dump_json check failed ****"
-	    echo "   ./ceph-dencoder type $type select_test $n dump_json > $tmp1"
-	    echo "   ./ceph-dencoder type $type select_test $n copy dump_json > $tmp2"
+	    echo "   ceph-dencoder type $type select_test $n dump_json > $tmp1"
+	    echo "   ceph-dencoder type $type select_test $n copy dump_json > $tmp2"
 	    echo "   diff $tmp1 $tmp2"
 	    failed=$(($failed + 1))
 	fi
 
 	if ! cmp $tmp1 $tmp4; then
 	    echo "**** $type test $n copy_ctor dump_json check failed ****"
-	    echo "   ./ceph-dencoder type $type select_test $n dump_json > $tmp1"
-	    echo "   ./ceph-dencoder type $type select_test $n copy_ctor dump_json > $tmp2"
+	    echo "   ceph-dencoder type $type select_test $n dump_json > $tmp1"
+	    echo "   ceph-dencoder type $type select_test $n copy_ctor dump_json > $tmp2"
 	    echo "   diff $tmp1 $tmp2"
 	    failed=$(($failed + 1))
 	fi
 
 	if [ $deterministic -ne 0 ]; then
-	    run_in_background pids bash -c "./ceph-dencoder type $safe_type select_test $n encode export $tmp1"
-	    run_in_background pids bash -c "./ceph-dencoder type $safe_type select_test $n encode decode encode export $tmp2"
+	    run_in_background pids bash -c "ceph-dencoder type $safe_type select_test $n encode export $tmp1"
+	    run_in_background pids bash -c "ceph-dencoder type $safe_type select_test $n encode decode encode export $tmp2"
 	    wait_background pids
 
 	    if ! cmp $tmp1 $tmp2; then
 		echo "**** $type test $n binary reencode check failed ****"
-		echo "   ./ceph-dencoder type $type select_test $n encode export $tmp1"
-		echo "   ./ceph-dencoder type $type select_test $n encode decode encode export $tmp2"
+		echo "   ceph-dencoder type $type select_test $n encode export $tmp1"
+		echo "   ceph-dencoder type $type select_test $n encode decode encode export $tmp2"
 		echo "   cmp $tmp1 $tmp2"
 		failed=$(($failed + 1))
 	    fi
diff --git a/src/test/encoding/readable.sh b/src/test/encoding/readable.sh
index 42cacb4..e5cfd17 100755
--- a/src/test/encoding/readable.sh
+++ b/src/test/encoding/readable.sh
@@ -1,6 +1,6 @@
 #!/bin/sh -e
 
-dir=../ceph-object-corpus
+dir=$CEPH_ROOT/ceph-object-corpus
 
 set -e
 
@@ -8,7 +8,7 @@ failed=0
 numtests=0
 pids=""
 
-myversion=`./ceph-dencoder version`
+myversion=`ceph-dencoder version`
 DEBUG=0
 WAITALL_DELAY=.1
 debug() { if [ "$DEBUG" -gt 0 ]; then echo "DEBUG: $*" >&2; fi }
@@ -23,7 +23,7 @@ test_object() {
     tmp2=`mktemp /tmp/typ-XXXXXXXXX`
 
     rm -f $output_file
-    if ./ceph-dencoder type $type 2>/dev/null; then
+    if ceph-dencoder type $type 2>/dev/null; then
       #echo "type $type";
       echo "        $vdir/objects/$type"
 
@@ -108,7 +108,7 @@ test_object() {
         # nondeterministically.  compare the sorted json
         # output.  this is a weaker test, but is better than
         # nothing.
-        if ! ./ceph-dencoder type $type is_deterministic; then
+        if ! ceph-dencoder type $type is_deterministic; then
           echo "  sorting json output for nondeterministic object"
           for f in $tmp1 $tmp2; do
             sort $f | sed 's/,$//' > $f.new
@@ -149,7 +149,7 @@ waitall() { # PID...
          errors=$(($errors + 1))
        fi
      done
-     (("$#" > 0)) || break
+     [ $# -eq 0 ] && break
      sleep ${WAITALL_DELAY:-1}
     done
    [ $errors -eq 0 ]
diff --git a/src/test/erasure-code/TestErasureCode.cc b/src/test/erasure-code/TestErasureCode.cc
index 27023e1..56759c4 100644
--- a/src/test/erasure-code/TestErasureCode.cc
+++ b/src/test/erasure-code/TestErasureCode.cc
@@ -15,6 +15,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 
 #include "global/global_init.h"
 #include "erasure-code/ErasureCode.h"
@@ -160,7 +161,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodeExample.cc b/src/test/erasure-code/TestErasureCodeExample.cc
index 64ef598..eff590f 100644
--- a/src/test/erasure-code/TestErasureCodeExample.cc
+++ b/src/test/erasure-code/TestErasureCodeExample.cc
@@ -13,6 +13,7 @@
  *  version 2.1 of the License, or (at your option) any later version.
  * 
  */
+#include <stdlib.h>
 
 #include "include/stringify.h"
 #include "global/global_init.h"
@@ -240,7 +241,9 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodeIsa.cc b/src/test/erasure-code/TestErasureCodeIsa.cc
index 29318e0..9c01e95 100644
--- a/src/test/erasure-code/TestErasureCodeIsa.cc
+++ b/src/test/erasure-code/TestErasureCodeIsa.cc
@@ -16,6 +16,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 
 #include "crush/CrushWrapper.h"
 #include "include/stringify.h"
@@ -962,7 +963,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodeJerasure.cc b/src/test/erasure-code/TestErasureCodeJerasure.cc
index c028e32..52d7c68 100644
--- a/src/test/erasure-code/TestErasureCodeJerasure.cc
+++ b/src/test/erasure-code/TestErasureCodeJerasure.cc
@@ -16,6 +16,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 
 #include "crush/CrushWrapper.h"
 #include "include/stringify.h"
@@ -364,7 +365,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodeLrc.cc b/src/test/erasure-code/TestErasureCodeLrc.cc
index 772351c..b945231 100644
--- a/src/test/erasure-code/TestErasureCodeLrc.cc
+++ b/src/test/erasure-code/TestErasureCodeLrc.cc
@@ -16,6 +16,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 
 #include "crush/CrushWrapper.h"
 #include "common/config.h"
@@ -401,9 +402,11 @@ TEST(ErasureCodeLrc, layers_init)
     ErasureCodeLrc lrc(g_conf->erasure_code_dir);
     ErasureCodeProfile profile;
 
-    const char *description_string =
-      "[ "
-      "  [ \"_cDDD_cDD_\", \"directory=.libs\" ],"
+    const char* env = getenv("CEPH_LIB");
+    string directory(env ? env : "lib");
+    string description_string = 
+      "[ " 
+      "  [ \"_cDDD_cDD_\", \"directory=" + directory + "\" ]," 
       "]";
     profile["layers"] = description_string;
     json_spirit::mArray description;
@@ -913,7 +916,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodePlugin.cc b/src/test/erasure-code/TestErasureCodePlugin.cc
index 5b0518e..45f1b28 100644
--- a/src/test/erasure-code/TestErasureCodePlugin.cc
+++ b/src/test/erasure-code/TestErasureCodePlugin.cc
@@ -17,6 +17,7 @@
 
 #include <errno.h>
 #include <signal.h>
+#include <stdlib.h>
 #include "common/Thread.h"
 #include "global/global_init.h"
 #include "erasure-code/ErasureCodePlugin.h"
@@ -82,7 +83,8 @@ TEST_F(ErasureCodePluginRegistryTest, factory_mutex) {
 TEST_F(ErasureCodePluginRegistryTest, all)
 {
   ErasureCodeProfile profile;
-  string directory(".libs");
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
   ErasureCodeInterfaceRef erasure_code;
   ErasureCodePluginRegistry &instance = ErasureCodePluginRegistry::instance();
   EXPECT_FALSE(erasure_code);
@@ -131,7 +133,9 @@ int main(int argc, char **argv) {
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodePluginIsa.cc b/src/test/erasure-code/TestErasureCodePluginIsa.cc
index f1b0884..1044017 100644
--- a/src/test/erasure-code/TestErasureCodePluginIsa.cc
+++ b/src/test/erasure-code/TestErasureCodePluginIsa.cc
@@ -13,6 +13,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 #include "arch/probe.h"
 #include "arch/intel.h"
 #include "global/global_init.h"
@@ -59,7 +60,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodePluginJerasure.cc b/src/test/erasure-code/TestErasureCodePluginJerasure.cc
index 1616ce6..f43f296 100644
--- a/src/test/erasure-code/TestErasureCodePluginJerasure.cc
+++ b/src/test/erasure-code/TestErasureCodePluginJerasure.cc
@@ -16,6 +16,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 #include "arch/probe.h"
 #include "arch/intel.h"
 #include "arch/arm.h"
@@ -256,7 +257,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodePluginLrc.cc b/src/test/erasure-code/TestErasureCodePluginLrc.cc
index 03abafc..f88d1cd 100644
--- a/src/test/erasure-code/TestErasureCodePluginLrc.cc
+++ b/src/test/erasure-code/TestErasureCodePluginLrc.cc
@@ -16,6 +16,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 #include "arch/probe.h"
 #include "arch/intel.h"
 #include "global/global_init.h"
@@ -47,7 +48,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodePluginShec.cc b/src/test/erasure-code/TestErasureCodePluginShec.cc
index 2708150..39d552b 100644
--- a/src/test/erasure-code/TestErasureCodePluginShec.cc
+++ b/src/test/erasure-code/TestErasureCodePluginShec.cc
@@ -17,6 +17,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 #include "arch/probe.h"
 #include "arch/intel.h"
 #include "arch/arm.h"
@@ -252,7 +253,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodeShec.cc b/src/test/erasure-code/TestErasureCodeShec.cc
index 1f87505..8da9e69 100644
--- a/src/test/erasure-code/TestErasureCodeShec.cc
+++ b/src/test/erasure-code/TestErasureCodeShec.cc
@@ -20,6 +20,7 @@
 
 #include <errno.h>
 #include <pthread.h>
+#include <stdlib.h>
 
 #include "crush/CrushWrapper.h"
 #include "osd/osd_types.h"
@@ -2674,7 +2675,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/TestErasureCodeShec_all.cc b/src/test/erasure-code/TestErasureCodeShec_all.cc
index 6e9a743..c8715eb 100644
--- a/src/test/erasure-code/TestErasureCodeShec_all.cc
+++ b/src/test/erasure-code/TestErasureCodeShec_all.cc
@@ -19,6 +19,7 @@
 // SUMMARY: TestErasureCodeShec combination of k,m,c by 301 patterns
 
 #include <errno.h>
+#include <stdlib.h>
 
 #include "crush/CrushWrapper.h"
 #include "osd/osd_types.h"
@@ -296,7 +297,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
 
diff --git a/src/test/erasure-code/TestErasureCodeShec_arguments.cc b/src/test/erasure-code/TestErasureCodeShec_arguments.cc
index 5d2f494..ea68ab2 100644
--- a/src/test/erasure-code/TestErasureCodeShec_arguments.cc
+++ b/src/test/erasure-code/TestErasureCodeShec_arguments.cc
@@ -19,6 +19,7 @@
 // SUMMARY: shec's gtest for each argument of minimum_to_decode()/decode()
 
 #include <errno.h>
+#include <stdlib.h>
 
 #include "crush/CrushWrapper.h"
 #include "osd/osd_types.h"
@@ -395,7 +396,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  std::string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
 
diff --git a/src/test/erasure-code/TestErasureCodeShec_thread.cc b/src/test/erasure-code/TestErasureCodeShec_thread.cc
index 529ec1b..3adab59 100644
--- a/src/test/erasure-code/TestErasureCodeShec_thread.cc
+++ b/src/test/erasure-code/TestErasureCodeShec_thread.cc
@@ -20,6 +20,7 @@
 
 #include <errno.h>
 #include <pthread.h>
+#include <stdlib.h>
 
 #include "crush/CrushWrapper.h"
 #include "osd/osd_types.h"
@@ -93,7 +94,9 @@ int main(int argc, char **argv)
   global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0);
   common_init_finish(g_ceph_context);
 
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  std::string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
diff --git a/src/test/erasure-code/ceph_erasure_code.cc b/src/test/erasure-code/ceph_erasure_code.cc
index a488366..aaa4304 100644
--- a/src/test/erasure-code/ceph_erasure_code.cc
+++ b/src/test/erasure-code/ceph_erasure_code.cc
@@ -88,7 +88,9 @@ int ErasureCodeCommand::setup(int argc, char** argv) {
     CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
   common_init_finish(g_ceph_context);
   g_ceph_context->_conf->apply_changes(NULL);
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  string directory(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", directory, false, false);
 
   if (vm.count("help")) {
     cout << desc << std::endl;
diff --git a/src/test/erasure-code/ceph_erasure_code_non_regression.cc b/src/test/erasure-code/ceph_erasure_code_non_regression.cc
index bc65123..7b0fa2f 100644
--- a/src/test/erasure-code/ceph_erasure_code_non_regression.cc
+++ b/src/test/erasure-code/ceph_erasure_code_non_regression.cc
@@ -15,6 +15,7 @@
  */
 
 #include <errno.h>
+#include <stdlib.h>
 #include <boost/scoped_ptr.hpp>
 #include <boost/lexical_cast.hpp>
 #include <boost/program_options/option.hpp>
@@ -99,7 +100,9 @@ int ErasureCodeNonRegression::setup(int argc, char** argv) {
     CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
   common_init_finish(g_ceph_context);
   g_ceph_context->_conf->apply_changes(NULL);
-  g_conf->set_val("erasure_code_dir", ".libs", false, false);
+  const char* env = getenv("CEPH_LIB");
+  std::string libs_dir(env ? env : "lib");
+  g_conf->set_val("erasure_code_dir", libs_dir, false, false);
 
   if (vm.count("help")) {
     cout << desc << std::endl;
diff --git a/src/test/erasure-code/test-erasure-code.sh b/src/test/erasure-code/test-erasure-code.sh
index a8661f3..49f501f 100755
--- a/src/test/erasure-code/test-erasure-code.sh
+++ b/src/test/erasure-code/test-erasure-code.sh
@@ -16,7 +16,7 @@
 # GNU Library Public License for more details.
 #
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -30,14 +30,14 @@ function run() {
     setup $dir || return 1
     run_mon $dir a || return 1
     # check that erasure code plugins are preloaded
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
     for id in $(seq 0 10) ; do
         run_osd $dir $id || return 1
     done
     wait_for_clean || return 1
     # check that erasure code plugins are preloaded
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
     grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
     create_erasure_coded_pool ecpool || return 1
 
@@ -53,9 +53,9 @@ function run() {
 function create_erasure_coded_pool() {
     local poolname=$1
 
-    ./ceph osd erasure-code-profile set myprofile \
+    ceph osd erasure-code-profile set myprofile \
         ruleset-failure-domain=osd || return 1
-    ./ceph osd pool create $poolname 12 12 erasure myprofile \
+    ceph osd pool create $poolname 12 12 erasure myprofile \
         || return 1
     wait_for_clean || return 1
 }
@@ -63,7 +63,7 @@ function create_erasure_coded_pool() {
 function delete_pool() {
     local poolname=$1
 
-    ./ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
+    ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
 }
 
 function rados_put_get() {
@@ -79,8 +79,8 @@ function rados_put_get() {
     #
     # get and put an object, compare they are equal
     #
-    ./rados --pool $poolname put $objname $dir/ORIGINAL || return 1
-    ./rados --pool $poolname get $objname $dir/COPY || return 1
+    rados --pool $poolname put $objname $dir/ORIGINAL || return 1
+    rados --pool $poolname get $objname $dir/COPY || return 1
     diff $dir/ORIGINAL $dir/COPY || return 1
     rm $dir/COPY
 
@@ -91,11 +91,11 @@ function rados_put_get() {
     #
     local -a initial_osds=($(get_osds $poolname $objname))
     local last=$((${#initial_osds[@]} - 1))
-    ./ceph osd out ${initial_osds[$last]} || return 1
+    ceph osd out ${initial_osds[$last]} || return 1
     ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
-    ./rados --pool $poolname get $objname $dir/COPY || return 1
+    rados --pool $poolname get $objname $dir/COPY || return 1
     diff $dir/ORIGINAL $dir/COPY || return 1
-    ./ceph osd in ${initial_osds[$last]} || return 1
+    ceph osd in ${initial_osds[$last]} || return 1
 
     rm $dir/ORIGINAL
 }
@@ -113,8 +113,8 @@ function rados_osds_out_in() {
     #
     # get and put an object, compare they are equal
     #
-    ./rados --pool $poolname put $objname $dir/ORIGINAL || return 1
-    ./rados --pool $poolname get $objname $dir/COPY || return 1
+    rados --pool $poolname put $objname $dir/ORIGINAL || return 1
+    rados --pool $poolname get $objname $dir/COPY || return 1
     diff $dir/ORIGINAL $dir/COPY || return 1
     rm $dir/COPY
 
@@ -128,7 +128,7 @@ function rados_osds_out_in() {
     local osds_list=$(get_osds $poolname $objname)
     local -a osds=($osds_list)
     for osd in 0 1 ; do
-        ./ceph osd out ${osds[$osd]} || return 1
+      ceph osd out ${osds[$osd]} || return 1
     done
     wait_for_clean || return 1
     #
@@ -137,7 +137,7 @@ function rados_osds_out_in() {
     for osd in 0 1 ; do
         ! get_osds $poolname $objname | grep '\<'${osds[$osd]}'\>' || return 1
     done
-    ./rados --pool $poolname get $objname $dir/COPY || return 1
+    rados --pool $poolname get $objname $dir/COPY || return 1
     diff $dir/ORIGINAL $dir/COPY || return 1
     #
     # bring the osds back in, , wait for the cluster
@@ -145,7 +145,7 @@ function rados_osds_out_in() {
     # implies the PG go back to using the same osds as before
     #
     for osd in 0 1 ; do
-        ./ceph osd in ${osds[$osd]} || return 1
+      ceph osd in ${osds[$osd]} || return 1
     done
     wait_for_clean || return 1
     test "$osds_list" = "$(get_osds $poolname $objname)" || return 1
@@ -157,18 +157,18 @@ function TEST_rados_put_get_lrc_advanced() {
     local poolname=pool-lrc-a
     local profile=profile-lrc-a
 
-    ./ceph osd erasure-code-profile set $profile \
+    ceph osd erasure-code-profile set $profile \
         plugin=lrc \
         mapping=DD_ \
         ruleset-steps='[ [ "chooseleaf", "osd", 0 ] ]' \
         layers='[ [ "DDc", "" ] ]'  || return 1
-    ./ceph osd pool create $poolname 12 12 erasure $profile \
+    ceph osd pool create $poolname 12 12 erasure $profile \
         || return 1
 
     rados_put_get $dir $poolname || return 1
 
     delete_pool $poolname
-    ./ceph osd erasure-code-profile rm $profile
+    ceph osd erasure-code-profile rm $profile
 }
 
 function TEST_rados_put_get_lrc_kml() {
@@ -176,17 +176,17 @@ function TEST_rados_put_get_lrc_kml() {
     local poolname=pool-lrc
     local profile=profile-lrc
 
-    ./ceph osd erasure-code-profile set $profile \
+    ceph osd erasure-code-profile set $profile \
         plugin=lrc \
         k=4 m=2 l=3 \
         ruleset-failure-domain=osd || return 1
-    ./ceph osd pool create $poolname 12 12 erasure $profile \
+    ceph osd pool create $poolname 12 12 erasure $profile \
         || return 1
 
     rados_put_get $dir $poolname || return 1
 
     delete_pool $poolname
-    ./ceph osd erasure-code-profile rm $profile
+    ceph osd erasure-code-profile rm $profile
 }
 
 function TEST_rados_put_get_isa() {
@@ -197,10 +197,10 @@ function TEST_rados_put_get_isa() {
     local dir=$1
     local poolname=pool-isa
 
-    ./ceph osd erasure-code-profile set profile-isa \
+    ceph osd erasure-code-profile set profile-isa \
         plugin=isa \
         ruleset-failure-domain=osd || return 1
-    ./ceph osd pool create $poolname 1 1 erasure profile-isa \
+    ceph osd pool create $poolname 1 1 erasure profile-isa \
         || return 1
 
     rados_put_get $dir $poolname || return 1
@@ -216,18 +216,18 @@ function TEST_rados_put_get_jerasure() {
     local poolname=pool-jerasure
     local profile=profile-jerasure
 
-    ./ceph osd erasure-code-profile set $profile \
+    ceph osd erasure-code-profile set $profile \
         plugin=jerasure \
         k=4 m=2 \
         ruleset-failure-domain=osd || return 1
-    ./ceph osd pool create $poolname 12 12 erasure $profile \
+    ceph osd pool create $poolname 12 12 erasure $profile \
         || return 1
 
     rados_put_get $dir $poolname || return 1
     rados_osds_out_in $dir $poolname || return 1
 
     delete_pool $poolname
-    ./ceph osd erasure-code-profile rm $profile
+    ceph osd erasure-code-profile rm $profile
 }
 
 function TEST_rados_put_get_shec() {
@@ -236,17 +236,17 @@ function TEST_rados_put_get_shec() {
     local poolname=pool-shec
     local profile=profile-shec
 
-    ./ceph osd erasure-code-profile set $profile \
+    ceph osd erasure-code-profile set $profile \
         plugin=shec \
         k=2 m=1 c=1 \
         ruleset-failure-domain=osd || return 1
-    ./ceph osd pool create $poolname 12 12 erasure $profile \
+    ceph osd pool create $poolname 12 12 erasure $profile \
         || return 1
 
     rados_put_get $dir $poolname || return 1
 
     delete_pool $poolname
-    ./ceph osd erasure-code-profile rm $profile
+    ceph osd erasure-code-profile rm $profile
 }
 
 function TEST_alignment_constraints() {
@@ -257,17 +257,17 @@ function TEST_alignment_constraints() {
     # imposed by the stripe width
     # See http://tracker.ceph.com/issues/8622
     #
-    local stripe_width=$(./ceph-conf --show-config-value osd_pool_erasure_code_stripe_width)
+    local stripe_width=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_width)
     local block_size=$((stripe_width - 1))
     dd if=/dev/zero of=$dir/ORIGINAL bs=$block_size count=2
-    ./rados --block-size=$block_size \
+    rados --block-size=$block_size \
         --pool ecpool put UNALIGNED $dir/ORIGINAL || return 1
     rm $dir/ORIGINAL
 }
 
 function chunk_size() {
-    local stripe_width=$(./ceph-conf --show-config-value osd_pool_erasure_code_stripe_width)
-    eval local $(./ceph osd erasure-code-profile get default | grep k=)
+    local stripe_width=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_width)
+    eval local $(ceph osd erasure-code-profile get default | grep k=)
     echo $(($stripe_width / $k))
 }
 
@@ -287,11 +287,11 @@ function verify_chunk_mapping() {
     local payload=$(printf '%*s' $(chunk_size) FIRST$poolname ; printf '%*s' $(chunk_size) SECOND$poolname)
     echo -n "$payload" > $dir/ORIGINAL
 
-    ./rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1
-    ./rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1
+    rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1
+    rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1
     local -a osds=($(get_osds $poolname SOMETHING$poolname))
     for (( i = 0; i < ${#osds[@]}; i++ )) ; do
-        ./ceph daemon osd.${osds[$i]} flush_journal
+        ceph daemon osd.${osds[$i]} flush_journal
     done
     diff $dir/ORIGINAL $dir/COPY || return 1
     rm $dir/COPY
@@ -311,13 +311,13 @@ function TEST_chunk_mapping() {
     #
     verify_chunk_mapping $dir ecpool 0 1 || return 1
 
-    ./ceph osd erasure-code-profile set remap-profile \
+    ceph osd erasure-code-profile set remap-profile \
         plugin=lrc \
         layers='[ [ "_DD", "" ] ]' \
         mapping='_DD' \
         ruleset-steps='[ [ "choose", "osd", 0 ] ]' || return 1
-    ./ceph osd erasure-code-profile get remap-profile
-    ./ceph osd pool create remap-pool 12 12 erasure remap-profile \
+    ceph osd erasure-code-profile get remap-profile
+    ceph osd pool create remap-pool 12 12 erasure remap-profile \
         || return 1
 
     #
@@ -328,7 +328,7 @@ function TEST_chunk_mapping() {
     verify_chunk_mapping $dir remap-pool 1 2 || return 1
 
     delete_pool remap-pool
-    ./ceph osd erasure-code-profile rm remap-profile
+    ceph osd erasure-code-profile rm remap-profile
 }
 
 main test-erasure-code "$@"
diff --git a/src/test/erasure-code/test-erasure-eio.sh b/src/test/erasure-code/test-erasure-eio.sh
index 32a6e17..7db60cd 100755
--- a/src/test/erasure-code/test-erasure-eio.sh
+++ b/src/test/erasure-code/test-erasure-eio.sh
@@ -16,7 +16,7 @@
 # GNU Library Public License for more details.
 #
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -32,7 +32,7 @@ function run() {
         setup $dir || return 1
         run_mon $dir a || return 1
         # check that erasure code plugins are preloaded
-        CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+        CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
         grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
         $func $dir || return 1
         teardown $dir || return 1
@@ -53,18 +53,18 @@ function setup_osds() {
     wait_for_clean || return 1
 
     # check that erasure code plugins are preloaded
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
     grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
 }
 
 function create_erasure_coded_pool() {
     local poolname=$1
 
-    ./ceph osd erasure-code-profile set myprofile \
+    ceph osd erasure-code-profile set myprofile \
         plugin=jerasure \
         k=2 m=1 \
         ruleset-failure-domain=osd || return 1
-    ./ceph osd pool create $poolname 1 1 erasure myprofile \
+    ceph osd pool create $poolname 1 1 erasure myprofile \
         || return 1
     wait_for_clean || return 1
 }
@@ -72,8 +72,8 @@ function create_erasure_coded_pool() {
 function delete_pool() {
     local poolname=$1
 
-    ./ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
-    ./ceph osd erasure-code-profile rm myprofile
+    ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
+    ceph osd erasure-code-profile rm myprofile
 }
 
 function rados_put() {
@@ -87,7 +87,7 @@ function rados_put() {
     #
     # get and put an object, compare they are equal
     #
-    ./rados --pool $poolname put $objname $dir/ORIGINAL || return 1
+    rados --pool $poolname put $objname $dir/ORIGINAL || return 1
 }
 
 function rados_get() {
@@ -101,13 +101,13 @@ function rados_get() {
     #
     if [ $expect = "1" ];
     then
-        ! ./rados --pool $poolname get $objname $dir/COPY
+        ! rados --pool $poolname get $objname $dir/COPY
         return
     fi
     #
     # get an object, compare with $dir/ORIGINAL
     #
-    ./rados --pool $poolname get $objname $dir/COPY || return 1
+    rados --pool $poolname get $objname $dir/COPY || return 1
     diff $dir/ORIGINAL $dir/COPY || return 1
     rm $dir/COPY
 }
@@ -134,10 +134,10 @@ function rados_put_get() {
         #
         local -a initial_osds=($(get_osds $poolname $objname))
         local last=$((${#initial_osds[@]} - 1))
-        ./ceph osd out ${initial_osds[$last]} || return 1
+        ceph osd out ${initial_osds[$last]} || return 1
         ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
         rados_get $dir $poolname $objname $expect || return 1
-        ./ceph osd in ${initial_osds[$last]} || return 1
+        ceph osd in ${initial_osds[$last]} || return 1
     fi
 
     rm $dir/ORIGINAL
@@ -155,7 +155,7 @@ function inject_eio() {
     local -a initial_osds=($(get_osds $poolname $objname))
     local osd_id=${initial_osds[$shard_id]}
     set_config osd $osd_id filestore_debug_inject_read_err true || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.$osd_id.asok \
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.$osd_id.asok \
              injectdataerr $poolname $objname $shard_id || return 1
 }
 
diff --git a/src/test/fedora-21/ceph.spec.in b/src/test/fedora-21/ceph.spec.in
index 3a5a6f7..26928f7 100644
--- a/src/test/fedora-21/ceph.spec.in
+++ b/src/test/fedora-21/ceph.spec.in
@@ -1040,6 +1040,7 @@ if ! getent passwd ceph >/dev/null ; then
     CEPH_USER_ID_OPTION=""
     getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
     useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
+fi
 %endif
 exit 0
 
diff --git a/src/test/librados_test_stub/TestClassHandler.cc b/src/test/librados_test_stub/TestClassHandler.cc
index 4f66e1e..ea595ab 100644
--- a/src/test/librados_test_stub/TestClassHandler.cc
+++ b/src/test/librados_test_stub/TestClassHandler.cc
@@ -6,6 +6,8 @@
 #include <boost/algorithm/string/predicate.hpp>
 #include <dlfcn.h>
 #include <errno.h>
+#include <stdlib.h>
+#include <string.h>
 #include "common/debug.h"
 #include "include/assert.h"
 
@@ -43,7 +45,9 @@ void TestClassHandler::open_class(const std::string& name,
 void TestClassHandler::open_all_classes() {
   assert(m_class_handles.empty());
 
-  DIR *dir = ::opendir(".libs");
+  const char* env = getenv("CEPH_LIB");
+  std::string CEPH_LIB(env ? env : "lib");
+  DIR *dir = ::opendir(CEPH_LIB.c_str());
   if (dir == NULL) {
     assert(false);;
   }
@@ -58,7 +62,7 @@ void TestClassHandler::open_all_classes() {
       continue;
     }
     std::string class_name = name.substr(7, name.size() - 10);
-    open_class(class_name, ".libs/" + name);
+    open_class(class_name, CEPH_LIB + "/" + name);
   }
   closedir(dir);
 }
diff --git a/src/test/libradosstriper/rados-striper.sh b/src/test/libradosstriper/rados-striper.sh
index ed0e892..a2de948 100755
--- a/src/test/libradosstriper/rados-striper.sh
+++ b/src/test/libradosstriper/rados-striper.sh
@@ -14,7 +14,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
diff --git a/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc b/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
index eb42feb..2ed8b8e 100644
--- a/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
+++ b/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
@@ -93,8 +93,8 @@ TEST_F(TestMockExclusiveLockReleaseRequest, Success) {
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
-  expect_block_writes(mock_image_ctx, 0);
   expect_cancel_op_requests(mock_image_ctx, 0);
+  expect_block_writes(mock_image_ctx, 0);
   expect_flush_notifies(mock_image_ctx);
 
   MockJournal *mock_journal = new MockJournal();
@@ -183,6 +183,7 @@ TEST_F(TestMockExclusiveLockReleaseRequest, BlockWritesError) {
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
+  expect_cancel_op_requests(mock_image_ctx, 0);
   expect_block_writes(mock_image_ctx, -EINVAL);
   expect_unblock_writes(mock_image_ctx);
 
@@ -204,8 +205,8 @@ TEST_F(TestMockExclusiveLockReleaseRequest, UnlockError) {
   expect_op_work_queue(mock_image_ctx);
 
   InSequence seq;
-  expect_block_writes(mock_image_ctx, 0);
   expect_cancel_op_requests(mock_image_ctx, 0);
+  expect_block_writes(mock_image_ctx, 0);
   expect_flush_notifies(mock_image_ctx);
 
   expect_unlock(mock_image_ctx, -EINVAL);
diff --git a/src/test/librbd/image/test_mock_RefreshRequest.cc b/src/test/librbd/image/test_mock_RefreshRequest.cc
index e362986..6af5f46 100644
--- a/src/test/librbd/image/test_mock_RefreshRequest.cc
+++ b/src/test/librbd/image/test_mock_RefreshRequest.cc
@@ -75,6 +75,7 @@ ACTION_P(TestFeatures, image_ctx) {
 
 ACTION_P(ShutDownExclusiveLock, image_ctx) {
   // shutting down exclusive lock will close object map and journal
+  image_ctx->exclusive_lock = nullptr;
   image_ctx->object_map = nullptr;
   image_ctx->journal = nullptr;
 }
@@ -277,6 +278,17 @@ public:
                           const std::string &snap_name, uint64_t snap_id) {
     EXPECT_CALL(mock_image_ctx, get_snap_id(snap_name)).WillOnce(Return(snap_id));
   }
+
+  void expect_block_writes(MockImageCtx &mock_image_ctx, int r) {
+    EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_))
+                  .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
+  }
+
+  void expect_unblock_writes(MockImageCtx &mock_image_ctx) {
+    EXPECT_CALL(*mock_image_ctx.aio_work_queue, unblock_writes())
+                  .Times(1);
+  }
+
 };
 
 TEST_F(TestMockImageRefreshRequest, SuccessV1) {
@@ -615,8 +627,10 @@ TEST_F(TestMockImageRefreshRequest, DisableJournal) {
   expect_get_mutable_metadata(mock_image_ctx, 0);
   expect_get_flags(mock_image_ctx, 0);
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
+  expect_block_writes(mock_image_ctx, 0);
   expect_clear_require_lock_on_read(mock_image_ctx);
   expect_close_journal(mock_image_ctx, *mock_journal, 0);
+  expect_unblock_writes(mock_image_ctx);
 
   C_SaferCond ctx;
   MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &ctx);
diff --git a/src/test/librbd/journal/test_Replay.cc b/src/test/librbd/journal/test_Replay.cc
index 1f822d6..f7e20f7 100644
--- a/src/test/librbd/journal/test_Replay.cc
+++ b/src/test/librbd/journal/test_Replay.cc
@@ -30,7 +30,14 @@ public:
       RWLock::WLocker owner_locker(ictx->owner_lock);
       ictx->exclusive_lock->request_lock(&lock_ctx);
     }
-    return lock_ctx.wait();
+    int r = lock_ctx.wait();
+    if (r < 0) {
+      return r;
+    }
+
+    C_SaferCond refresh_ctx;
+    ictx->state->refresh(&refresh_ctx);
+    return refresh_ctx.wait();
   }
 
   template<typename T>
@@ -129,6 +136,7 @@ TEST_F(TestJournalReplay, AioDiscardEvent) {
   // inject a discard operation into the journal
   inject_into_journal(ictx,
                       librbd::journal::AioDiscardEvent(0, payload.size()));
+  close_image(ictx);
 
   // re-open the journal so that it replays the new entry
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -153,6 +161,8 @@ TEST_F(TestJournalReplay, AioDiscardEvent) {
                       librbd::journal::AioDiscardEvent(0, payload.size()));
   inject_into_journal(ictx,
                       librbd::journal::AioDiscardEvent(0, payload.size()));
+  close_image(ictx);
+
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
   ASSERT_EQ(0, when_acquired_lock(ictx));
   get_journal_commit_position(ictx, &current_tag, &current_entry);
@@ -184,6 +194,7 @@ TEST_F(TestJournalReplay, AioWriteEvent) {
   payload_bl.append(payload);
   inject_into_journal(ictx,
       librbd::journal::AioWriteEvent(0, payload.size(), payload_bl));
+  close_image(ictx);
 
   // re-open the journal so that it replays the new entry
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -209,6 +220,8 @@ TEST_F(TestJournalReplay, AioWriteEvent) {
       librbd::journal::AioWriteEvent(0, payload.size(), payload_bl));
   inject_into_journal(ictx,
       librbd::journal::AioWriteEvent(0, payload.size(), payload_bl));
+  close_image(ictx);
+
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
   ASSERT_EQ(0, when_acquired_lock(ictx));
   get_journal_commit_position(ictx, &current_tag, &current_entry);
@@ -251,6 +264,7 @@ TEST_F(TestJournalReplay, AioFlushEvent) {
                                          payload.c_str(), 0);
   }
   ictx->journal = journal;
+  close_image(ictx);
 
   // re-open the journal so that it replays the new entry
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -278,6 +292,8 @@ TEST_F(TestJournalReplay, AioFlushEvent) {
   // replay several events and check the commit position
   inject_into_journal(ictx, librbd::journal::AioFlushEvent());
   inject_into_journal(ictx, librbd::journal::AioFlushEvent());
+  close_image(ictx);
+
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
   ASSERT_EQ(0, when_acquired_lock(ictx));
   get_journal_commit_position(ictx, &current_tag, &current_entry);
@@ -307,6 +323,7 @@ TEST_F(TestJournalReplay, SnapCreate) {
   // inject snapshot ops into journal
   inject_into_journal(ictx, librbd::journal::SnapCreateEvent(1, "snap"));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -345,6 +362,7 @@ TEST_F(TestJournalReplay, SnapProtect) {
   // inject snapshot ops into journal
   inject_into_journal(ictx, librbd::journal::SnapProtectEvent(1, "snap"));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -390,6 +408,7 @@ TEST_F(TestJournalReplay, SnapUnprotect) {
   // inject snapshot ops into journal
   inject_into_journal(ictx, librbd::journal::SnapUnprotectEvent(1, "snap"));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -435,6 +454,7 @@ TEST_F(TestJournalReplay, SnapRename) {
   // inject snapshot ops into journal
   inject_into_journal(ictx, librbd::journal::SnapRenameEvent(1, snap_id, "snap2"));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -475,6 +495,7 @@ TEST_F(TestJournalReplay, SnapRollback) {
   // inject snapshot ops into journal
   inject_into_journal(ictx, librbd::journal::SnapRollbackEvent(1, "snap"));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -509,6 +530,7 @@ TEST_F(TestJournalReplay, SnapRemove) {
   // inject snapshot ops into journal
   inject_into_journal(ictx, librbd::journal::SnapRemoveEvent(1, "snap"));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -548,6 +570,7 @@ TEST_F(TestJournalReplay, Rename) {
   std::string new_image_name(get_temp_image_name());
   inject_into_journal(ictx, librbd::journal::RenameEvent(1, new_image_name));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -580,6 +603,7 @@ TEST_F(TestJournalReplay, Resize) {
   // inject snapshot ops into journal
   inject_into_journal(ictx, librbd::journal::ResizeEvent(1, 16));
   inject_into_journal(ictx, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx);
 
   // replay journal
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -621,6 +645,7 @@ TEST_F(TestJournalReplay, Flatten) {
   // inject snapshot ops into journal
   inject_into_journal(ictx2, librbd::journal::FlattenEvent(1));
   inject_into_journal(ictx2, librbd::journal::OpFinishEvent(1, 0));
+  close_image(ictx2);
 
   // replay journal
   ASSERT_EQ(0, open_image(clone_name, &ictx2));
diff --git a/src/test/librgw_file_nfsns.cc b/src/test/librgw_file_nfsns.cc
index 7f52c80..de8aa5f 100644
--- a/src/test/librgw_file_nfsns.cc
+++ b/src/test/librgw_file_nfsns.cc
@@ -262,6 +262,9 @@ TEST(LibRGW, SETUP_DIRS1) {
 	rc = rgw_mkdir(fs, dirs1_b.parent_fh, dirs1_b.name.c_str(), &st,
 		      create_mask, &dirs1_b.fh, RGW_MKDIR_FLAG_NONE);
 	ASSERT_EQ(rc, 0);
+      } else {
+	/* no top-level dir and can't create it--skip remaining tests */
+	return;
       }
     }
     dirs1_b.sync();
@@ -487,6 +490,12 @@ TEST(LibRGW, RGW_CROSSBUCKET_RENAME1) {
 TEST(LibRGW, BAD_DELETES_DIRS1) {
   if (do_dirs1) {
     int rc;
+
+    if (dirs_vec.size() == 0) {
+      /* skip */
+      return;
+    }
+
     if (do_delete) {
       /* try to unlink a non-empty directory (bucket) */
       rc = rgw_unlink(fs, dirs1_b.parent_fh, dirs1_b.name.c_str(),
@@ -494,7 +503,8 @@ TEST(LibRGW, BAD_DELETES_DIRS1) {
       ASSERT_NE(rc, 0);
     }
     /* try to unlink a non-empty directory (non-bucket) */
-    obj_rec& sdir_0 = get<1>(dirs_vec[0])[0];    ASSERT_EQ(sdir_0.name, "sdir_0");
+    obj_rec& sdir_0 = get<1>(dirs_vec[0])[0];
+    ASSERT_EQ(sdir_0.name, "sdir_0");
     ASSERT_TRUE(sdir_0.rgw_fh->is_dir());
     /* XXX we can't enforce this currently */
 #if 0
diff --git a/src/test/mon/misc.sh b/src/test/mon/misc.sh
index 1f8f755..53f7bff 100755
--- a/src/test/mon/misc.sh
+++ b/src/test/mon/misc.sh
@@ -15,7 +15,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
diff --git a/src/test/mon/mkfs.sh b/src/test/mon/mkfs.sh
index f165fc6..270736c 100755
--- a/src/test/mon/mkfs.sh
+++ b/src/test/mon/mkfs.sh
@@ -39,11 +39,11 @@ function teardown() {
 function mon_mkfs() {
     local fsid=$(uuidgen)
 
-    ./ceph-mon \
+    ceph-mon \
         --id $MON_ID \
         --fsid $fsid \
-        --erasure-code-dir=.libs \
-        --compression-dir=.libs \
+        --erasure-code-dir=$CEPH_LIB \
+        --compression-dir=$CEPH_LIB \
         --mkfs \
         --mon-data=$MON_DIR \
         --mon-initial-members=$MON_ID \
@@ -52,13 +52,13 @@ function mon_mkfs() {
 }
 
 function mon_run() {
-    ./ceph-mon \
+    ceph-mon \
         --id $MON_ID \
         --chdir= \
         --mon-osd-full-ratio=.99 \
         --mon-data-avail-crit=1 \
-        --erasure-code-dir=.libs \
-        --compression-dir=.libs \
+        --erasure-code-dir=$CEPH_LIB \
+        --compression-dir=$CEPH_LIB \
         --mon-data=$MON_DIR \
         --log-file=$MON_DIR/log \
         --mon-cluster-log-file=$MON_DIR/log \
@@ -81,12 +81,12 @@ function kill_daemons() {
 function auth_none() {
     mon_mkfs --auth-supported=none
 
-    ./ceph-mon \
+    ceph-mon \
         --id $MON_ID \
         --mon-osd-full-ratio=.99 \
         --mon-data-avail-crit=1 \
-        --erasure-code-dir=.libs \
-        --compression-dir=.libs \
+        --erasure-code-dir=$CEPH_LIB \
+        --compression-dir=$CEPH_LIB \
         --mon-data=$MON_DIR \
         --extract-monmap $MON_DIR/monmap
 
@@ -96,7 +96,7 @@ function auth_none() {
 
     mon_run --auth-supported=none
     
-    timeout $TIMEOUT ./ceph --mon-host $CEPH_MON mon stat || return 1
+    timeout $TIMEOUT ceph --mon-host $CEPH_MON mon stat || return 1
 }
 
 function auth_cephx_keyring() {
@@ -112,7 +112,7 @@ EOF
 
     mon_run
 
-    timeout $TIMEOUT ./ceph \
+    timeout $TIMEOUT ceph \
         --name mon. \
         --keyring $MON_DIR/keyring \
         --mon-host $CEPH_MON mon stat || return 1
@@ -124,7 +124,7 @@ function auth_cephx_key() {
 	return 1
     fi  
 
-    local key=$(./ceph-authtool --gen-print-key)
+    local key=$(ceph-authtool --gen-print-key)
 
     if mon_mkfs --key='corrupted key' ; then
         return 1
@@ -139,7 +139,7 @@ function auth_cephx_key() {
 
     mon_run
 
-    timeout $TIMEOUT ./ceph \
+    timeout $TIMEOUT ceph \
         --name mon. \
         --keyring $MON_DIR/keyring \
         --mon-host $CEPH_MON mon stat || return 1
@@ -149,12 +149,12 @@ function makedir() {
     local toodeep=$MON_DIR/toodeep
 
     # fail if recursive directory creation is needed
-    ./ceph-mon \
+    ceph-mon \
         --id $MON_ID \
         --mon-osd-full-ratio=.99 \
         --mon-data-avail-crit=1 \
-        --erasure-code-dir=.libs \
-        --compression-dir=.libs \
+        --compression-dir=$CEPH_LIB \
+        --erasure-code-dir=$CEPH_LIB \
         --mkfs \
         --mon-data=$toodeep 2>&1 | tee $DIR/makedir.log
     grep 'toodeep.*No such file' $DIR/makedir.log > /dev/null
diff --git a/src/test/mon/mon-created-time.sh b/src/test/mon/mon-created-time.sh
index b4f7d1a..0022a23 100755
--- a/src/test/mon/mon-created-time.sh
+++ b/src/test/mon/mon-created-time.sh
@@ -12,7 +12,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -36,13 +36,13 @@ function TEST_mon_created_time() {
 
     run_mon $dir a || return 1
 
-    ./ceph mon dump || return 1
+    ceph mon dump || return 1
 
-    if test "$(./ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = ""x ; then
+    if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = ""x ; then
         return 1
     fi
  
-    if test "$(./ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = "0.000000"x ; then
+    if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = "0.000000"x ; then
         return 1
     fi
 }
diff --git a/src/test/mon/mon-handle-forward.sh b/src/test/mon/mon-handle-forward.sh
index 18f6db5..f10e6e5 100755
--- a/src/test/mon/mon-handle-forward.sh
+++ b/src/test/mon/mon-handle-forward.sh
@@ -15,7 +15,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -33,21 +33,21 @@ function run() {
         run_mon $dir b --public-addr $MONB || return 1
     )
 
-    timeout 360 ./ceph --mon-host $MONA mon stat || return 1
+    timeout 360 ceph --mon-host $MONA mon stat || return 1
     # check that MONB is indeed a peon
-    ./ceph --admin-daemon $dir/ceph-mon.b.asok mon_status |
+    ceph --admin-daemon $dir/ceph-mon.b.asok mon_status |
        grep '"peon"' || return 1
     # when the leader ( MONA ) is used, there is no message forwarding
-    ./ceph --mon-host $MONA osd pool create POOL1 12 
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    ceph --mon-host $MONA osd pool create POOL1 12 
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep 'mon_command(.*"POOL1"' $dir/a/mon.a.log
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.b.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.b.asok log flush || return 1
     grep 'mon_command(.*"POOL1"' $dir/mon.b.log && return 1
     # when the peon ( MONB ) is used, the message is forwarded to the leader
-    ./ceph --mon-host $MONB osd pool create POOL2 12
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.b.asok log flush || return 1
+    ceph --mon-host $MONB osd pool create POOL2 12
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.b.asok log flush || return 1
     grep 'forward_request.*mon_command(.*"POOL2"' $dir/mon.b.log
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep ' forward(mon_command(.*"POOL2"' $dir/mon.a.log
     # forwarded messages must retain features from the original connection
     features=$(sed -n -e 's|.*127.0.0.1:0.*accept features \([0-9][0-9]*\)|\1|p' < \
diff --git a/src/test/mon/mon-ping.sh b/src/test/mon/mon-ping.sh
index c27dc7b..d3adf5c 100755
--- a/src/test/mon/mon-ping.sh
+++ b/src/test/mon/mon-ping.sh
@@ -12,7 +12,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -36,7 +36,7 @@ function TEST_mon_ping() {
 
     run_mon $dir a || return 1
 
-    ./ceph ping mon.a || return 1
+    ceph ping mon.a || return 1
 }
 
 main mon-ping "$@"
diff --git a/src/test/mon/mon-scrub.sh b/src/test/mon/mon-scrub.sh
index b869839..b420539 100755
--- a/src/test/mon/mon-scrub.sh
+++ b/src/test/mon/mon-scrub.sh
@@ -15,7 +15,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -39,7 +39,7 @@ function TEST_mon_scrub() {
 
     run_mon $dir a || return 1
 
-    ./ceph mon scrub || return 1
+    ceph mon scrub || return 1
 }
 
 main mon-scrub "$@"
diff --git a/src/test/mon/osd-crush.sh b/src/test/mon/osd-crush.sh
index 4dbbd04..787fefb 100755
--- a/src/test/mon/osd-crush.sh
+++ b/src/test/mon/osd-crush.sh
@@ -15,7 +15,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -39,20 +39,20 @@ function TEST_crush_rule_create_simple() {
 
     run_mon $dir a || return 1
 
-    ./ceph --format xml osd crush rule dump replicated_ruleset | \
+    ceph --format xml osd crush rule dump replicated_ruleset | \
         egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
         grep '<op>choose_firstn</op><num>0</num><type>osd</type>' || return 1
     local ruleset=ruleset0
     local root=host1
-    ./ceph osd crush add-bucket $root host
+    ceph osd crush add-bucket $root host
     local failure_domain=osd
-    ./ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1
-    ./ceph osd crush rule create-simple $ruleset $root $failure_domain 2>&1 | \
+    ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1
+    ceph osd crush rule create-simple $ruleset $root $failure_domain 2>&1 | \
         grep "$ruleset already exists" || return 1
-    ./ceph --format xml osd crush rule dump $ruleset | \
+    ceph --format xml osd crush rule dump $ruleset | \
         egrep '<op>take</op><item>[^<]+</item><item_name>'$root'</item_name>' | \
         grep '<op>choose_firstn</op><num>0</num><type>'$failure_domain'</type>' || return 1
-    ./ceph osd crush rule rm $ruleset || return 1
+    ceph osd crush rule rm $ruleset || return 1
 }
 
 function TEST_crush_rule_dump() {
@@ -61,14 +61,14 @@ function TEST_crush_rule_dump() {
     run_mon $dir a || return 1
 
     local ruleset=ruleset1
-    ./ceph osd crush rule create-erasure $ruleset || return 1
+    ceph osd crush rule create-erasure $ruleset || return 1
     local expected
     expected="<rule_name>$ruleset</rule_name>"
-    ./ceph --format xml osd crush rule dump $ruleset | grep $expected || return 1
+    ceph --format xml osd crush rule dump $ruleset | grep $expected || return 1
     expected='"rule_name": "'$ruleset'"'
-    ./ceph osd crush rule dump | grep "$expected" || return 1
-    ! ./ceph osd crush rule dump non_existent_ruleset || return 1
-    ./ceph osd crush rule rm $ruleset || return 1
+    ceph osd crush rule dump | grep "$expected" || return 1
+    ! ceph osd crush rule dump non_existent_ruleset || return 1
+    ceph osd crush rule rm $ruleset || return 1
 }
 
 function TEST_crush_rule_rm() {
@@ -76,10 +76,10 @@ function TEST_crush_rule_rm() {
 
     run_mon $dir a || return 1
 
-    ./ceph osd crush rule create-erasure $ruleset default || return 1
-    ./ceph osd crush rule ls | grep $ruleset || return 1
-    ./ceph osd crush rule rm $ruleset || return 1
-    ! ./ceph osd crush rule ls | grep $ruleset || return 1
+    ceph osd crush rule create-erasure $ruleset default || return 1
+    ceph osd crush rule ls | grep $ruleset || return 1
+    ceph osd crush rule rm $ruleset || return 1
+    ! ceph osd crush rule ls | grep $ruleset || return 1
 }
 
 function TEST_crush_rule_create_erasure() {
@@ -93,32 +93,32 @@ function TEST_crush_rule_create_erasure() {
     #
     # create a new ruleset with the default profile, implicitly
     #
-    ./ceph osd crush rule create-erasure $ruleset || return 1
-    ./ceph osd crush rule create-erasure $ruleset 2>&1 | \
+    ceph osd crush rule create-erasure $ruleset || return 1
+    ceph osd crush rule create-erasure $ruleset 2>&1 | \
         grep "$ruleset already exists" || return 1
-    ./ceph --format xml osd crush rule dump $ruleset | \
+    ceph --format xml osd crush rule dump $ruleset | \
         egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
         grep '<op>chooseleaf_indep</op><num>0</num><type>host</type>' || return 1
-    ./ceph osd crush rule rm $ruleset || return 1
-    ! ./ceph osd crush rule ls | grep $ruleset || return 1
+    ceph osd crush rule rm $ruleset || return 1
+    ! ceph osd crush rule ls | grep $ruleset || return 1
     #
     # create a new ruleset with the default profile, explicitly
     #
-    ./ceph osd crush rule create-erasure $ruleset default || return 1
-    ./ceph osd crush rule ls | grep $ruleset || return 1
-    ./ceph osd crush rule rm $ruleset || return 1
-    ! ./ceph osd crush rule ls | grep $ruleset || return 1
+    ceph osd crush rule create-erasure $ruleset default || return 1
+    ceph osd crush rule ls | grep $ruleset || return 1
+    ceph osd crush rule rm $ruleset || return 1
+    ! ceph osd crush rule ls | grep $ruleset || return 1
     #
     # create a new ruleset and the default profile, implicitly
     #
-    ./ceph osd erasure-code-profile rm default || return 1
-    ! ./ceph osd erasure-code-profile ls | grep default || return 1
-    ./ceph osd crush rule create-erasure $ruleset || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    ceph osd erasure-code-profile rm default || return 1
+    ! ceph osd erasure-code-profile ls | grep default || return 1
+    ceph osd crush rule create-erasure $ruleset || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep 'profile set default' $dir/mon.a.log || return 1
-    ./ceph osd erasure-code-profile ls | grep default || return 1
-    ./ceph osd crush rule rm $ruleset || return 1
-    ! ./ceph osd crush rule ls | grep $ruleset || return 1
+    ceph osd erasure-code-profile ls | grep default || return 1
+    ceph osd crush rule rm $ruleset || return 1
+    ! ceph osd crush rule ls | grep $ruleset || return 1
     #
     # verify that if the crushmap contains a bugous ruleset,
     # it will prevent the creation of a pool.
@@ -127,32 +127,32 @@ function TEST_crush_rule_create_erasure() {
     ceph tell mon.\* injectargs --crushtool "false"
 
     expect_failure $dir "Error EINVAL" \
-        ./ceph osd pool create mypool 1 1 erasure || return 1
+        ceph osd pool create mypool 1 1 erasure || return 1
 }
 
 function check_ruleset_id_match_rule_id() {
     local rule_name=$1
-    rule_id=`./ceph osd crush rule dump $rule_name | grep "\"rule_id\":" | awk -F ":|," '{print int($2)}'`
-    ruleset_id=`./ceph osd crush rule dump $rule_name | grep "\"ruleset\":"| awk -F ":|," '{print int($2)}'`
+    rule_id=`ceph osd crush rule dump $rule_name | grep "\"rule_id\":" | awk -F ":|," '{print int($2)}'`
+    ruleset_id=`ceph osd crush rule dump $rule_name | grep "\"ruleset\":"| awk -F ":|," '{print int($2)}'`
     test $ruleset_id = $rule_id || return 1
 }
 
 function generate_manipulated_rules() {
     local dir=$1
-    ./ceph osd crush add-bucket $root host
-    ./ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
-    ./ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
-    ./ceph osd getcrushmap -o $dir/original_map
-    ./crushtool -d $dir/original_map -o $dir/decoded_original_map
+    ceph osd crush add-bucket $root host
+    ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
+    ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
+    ceph osd getcrushmap -o $dir/original_map
+    crushtool -d $dir/original_map -o $dir/decoded_original_map
     #manipulate the rulesets , to make the rule_id != ruleset_id
     sed -i 's/ruleset 0/ruleset 3/' $dir/decoded_original_map
     sed -i 's/ruleset 2/ruleset 0/' $dir/decoded_original_map
     sed -i 's/ruleset 1/ruleset 2/' $dir/decoded_original_map
 
-    ./crushtool -c $dir/decoded_original_map -o $dir/new_map
-    ./ceph osd setcrushmap -i $dir/new_map
+    crushtool -c $dir/decoded_original_map -o $dir/new_map
+    ceph osd setcrushmap -i $dir/new_map
 
-    ./ceph osd crush rule dump
+    ceph osd crush rule dump
 }
 
 function TEST_crush_ruleset_match_rule_when_creating() {
@@ -164,9 +164,9 @@ function TEST_crush_ruleset_match_rule_when_creating() {
 
     generate_manipulated_rules $dir
 
-    ./ceph osd crush rule create-simple special_rule_simple $root osd firstn || return 1
+    ceph osd crush rule create-simple special_rule_simple $root osd firstn || return 1
 
-    ./ceph osd crush rule dump
+    ceph osd crush rule dump
     #show special_rule_simple has same rule_id and ruleset_id
     check_ruleset_id_match_rule_id special_rule_simple || return 1
 }
@@ -178,11 +178,11 @@ function TEST_add_ruleset_failed() {
 
     local root=host1
 
-    ./ceph osd crush add-bucket $root host
-    ./ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
-    ./ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
-    ./ceph osd getcrushmap > $dir/crushmap || return 1
-    ./crushtool --decompile $dir/crushmap > $dir/crushmap.txt || return 1
+    ceph osd crush add-bucket $root host
+    ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
+    ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
+    ceph osd getcrushmap > $dir/crushmap || return 1
+    crushtool --decompile $dir/crushmap > $dir/crushmap.txt || return 1
     for i in $(seq 3 255)
         do
             cat <<EOF
@@ -197,9 +197,9 @@ rule test_rule$i {
 }
 EOF
     done >> $dir/crushmap.txt
-    ./crushtool --compile $dir/crushmap.txt -o $dir/crushmap || return 1
-    ./ceph osd setcrushmap -i $dir/crushmap  || return 1
-    ./ceph osd crush rule create-simple test_rule_nospace $root osd firstn 2>&1 | grep "Error ENOSPC" || return 1
+    crushtool --compile $dir/crushmap.txt -o $dir/crushmap || return 1
+    ceph osd setcrushmap -i $dir/crushmap  || return 1
+    ceph osd crush rule create-simple test_rule_nospace $root osd firstn 2>&1 | grep "Error ENOSPC" || return 1
 
 }
 
@@ -208,12 +208,12 @@ function TEST_crush_rename_bucket() {
 
     run_mon $dir a || return 1
 
-    ./ceph osd crush add-bucket host1 host
-    ! ./ceph osd tree | grep host2 || return 1
-    ./ceph osd crush rename-bucket host1 host2 || return 1
-    ./ceph osd tree | grep host2 || return 1
-    ./ceph osd crush rename-bucket host1 host2 || return 1 # idempotency
-    ./ceph osd crush rename-bucket nonexistent something 2>&1 | grep "Error ENOENT" || return 1
+    ceph osd crush add-bucket host1 host
+    ! ceph osd tree | grep host2 || return 1
+    ceph osd crush rename-bucket host1 host2 || return 1
+    ceph osd tree | grep host2 || return 1
+    ceph osd crush rename-bucket host1 host2 || return 1 # idempotency
+    ceph osd crush rename-bucket nonexistent something 2>&1 | grep "Error ENOENT" || return 1
 }
 
 function TEST_crush_reject_empty() {
@@ -224,17 +224,17 @@ function TEST_crush_reject_empty() {
 
     local empty_map=$dir/empty_map
     :> $empty_map.txt
-    ./crushtool -c $empty_map.txt -o $empty_map.map || return 1
+    crushtool -c $empty_map.txt -o $empty_map.map || return 1
     expect_failure $dir "Error EINVAL" \
-        ./ceph osd setcrushmap -i $empty_map.map || return 1
+        ceph osd setcrushmap -i $empty_map.map || return 1
 }
 
 function TEST_crush_tree() {
     local dir=$1
     run_mon $dir a || return 1
 
-    ./ceph osd crush tree --format=xml | \
-        $XMLSTARLET val -e -r test/mon/osd-crush-tree.rng - || return 1
+    ceph osd crush tree --format=xml | \
+        $XMLSTARLET val -e -r $CEPH_ROOT/src/test/mon/osd-crush-tree.rng - || return 1
 }
 
 # NB: disable me if i am too time consuming
@@ -254,7 +254,7 @@ function TEST_crush_repair_faulty_crushmap() {
 
     local empty_map=$dir/empty_map
     :> $empty_map.txt
-    ./crushtool -c $empty_map.txt -o $empty_map.map || return 1
+    crushtool -c $empty_map.txt -o $empty_map.map || return 1
 
     local crushtool_path_old=`ceph-conf --show-config-value crushtool`
     ceph tell mon.\* injectargs --crushtool "true"
@@ -267,7 +267,7 @@ function TEST_crush_repair_faulty_crushmap() {
     # vain, after mon.a is offline
     kill_daemons $dir || return 1
     # rewrite the monstore with the good crush map,
-    ./tools/ceph-monstore-update-crush.sh --rewrite $dir/a || return 1
+    $CEPH_ROOT/src/tools/ceph-monstore-update-crush.sh --rewrite $dir/a || return 1
 
     run_mon $dir a --public-addr $MONA || return 1
     run_mon $dir b --public-addr $MONB || return 1
diff --git a/src/test/mon/osd-erasure-code-profile.sh b/src/test/mon/osd-erasure-code-profile.sh
index 8e9491f..65c410a 100755
--- a/src/test/mon/osd-erasure-code-profile.sh
+++ b/src/test/mon/osd-erasure-code-profile.sh
@@ -15,7 +15,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -44,27 +44,27 @@ function TEST_set() {
     #
     # no key=value pairs : use the default configuration
     #
-    ./ceph osd erasure-code-profile set $profile 2>&1 || return 1
-    ./ceph osd erasure-code-profile get $profile | \
+    ceph osd erasure-code-profile set $profile 2>&1 || return 1
+    ceph osd erasure-code-profile get $profile | \
         grep plugin=jerasure || return 1
-    ./ceph osd erasure-code-profile rm $profile
+    ceph osd erasure-code-profile rm $profile
     #
     # key=value pairs override the default
     #
-    ./ceph osd erasure-code-profile set $profile \
+    ceph osd erasure-code-profile set $profile \
         key=value plugin=example || return 1
-    ./ceph osd erasure-code-profile get $profile | \
+    ceph osd erasure-code-profile get $profile | \
         grep -e key=value -e plugin=example || return 1
     #
     # --force is required to override an existing profile
     #
-    ! ./ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
+    ! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
     grep 'will not override' $dir/out || return 1
-    ./ceph osd erasure-code-profile set $profile key=other --force || return 1
-    ./ceph osd erasure-code-profile get $profile | \
+    ceph osd erasure-code-profile set $profile key=other --force || return 1
+    ceph osd erasure-code-profile get $profile | \
         grep key=other || return 1
 
-    ./ceph osd erasure-code-profile rm $profile # cleanup
+    ceph osd erasure-code-profile rm $profile # cleanup
 }
 
 function TEST_ls() {
@@ -74,13 +74,13 @@ function TEST_ls() {
     run_mon $dir a || return 1
 
     local profile=myprofile
-    ! ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph osd erasure-code-profile set $profile 2>&1 || return 1
-    ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph --format xml osd erasure-code-profile ls | \
+    ! ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph osd erasure-code-profile set $profile 2>&1 || return 1
+    ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph --format xml osd erasure-code-profile ls | \
         grep "<profile>$profile</profile>" || return 1
 
-    ./ceph osd erasure-code-profile rm $profile # cleanup
+    ceph osd erasure-code-profile rm $profile # cleanup
 }
 
 function TEST_rm() {
@@ -90,21 +90,21 @@ function TEST_rm() {
     run_mon $dir a || return 1
 
     local profile=myprofile
-    ./ceph osd erasure-code-profile set $profile 2>&1 || return 1
-    ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph osd erasure-code-profile rm $profile || return 1
-    ! ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph osd erasure-code-profile rm WRONG 2>&1 | \
+    ceph osd erasure-code-profile set $profile 2>&1 || return 1
+    ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph osd erasure-code-profile rm $profile || return 1
+    ! ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph osd erasure-code-profile rm WRONG 2>&1 | \
         grep "WRONG does not exist" || return 1
 
-    ./ceph osd erasure-code-profile set $profile || return 1
-    ./ceph osd pool create poolname 12 12 erasure $profile || return 1
-    ! ./ceph osd erasure-code-profile rm $profile > $dir/out 2>&1 || return 1
+    ceph osd erasure-code-profile set $profile || return 1
+    ceph osd pool create poolname 12 12 erasure $profile || return 1
+    ! ceph osd erasure-code-profile rm $profile > $dir/out 2>&1 || return 1
     grep "poolname.*using.*$profile" $dir/out || return 1
-    ./ceph osd pool delete poolname poolname --yes-i-really-really-mean-it || return 1
-    ./ceph osd erasure-code-profile rm $profile || return 1
+    ceph osd pool delete poolname poolname --yes-i-really-really-mean-it || return 1
+    ceph osd erasure-code-profile rm $profile || return 1
 
-    ./ceph osd erasure-code-profile rm $profile # cleanup
+    ceph osd erasure-code-profile rm $profile # cleanup
 }
 
 function TEST_get() {
@@ -114,11 +114,11 @@ function TEST_get() {
     run_mon $dir a || return 1
 
     local default_profile=default
-    ./ceph osd erasure-code-profile get $default_profile | \
+    ceph osd erasure-code-profile get $default_profile | \
         grep plugin=jerasure || return 1
-    ./ceph --format xml osd erasure-code-profile get $default_profile | \
+    ceph --format xml osd erasure-code-profile get $default_profile | \
         grep '<plugin>jerasure</plugin>' || return 1
-    ! ./ceph osd erasure-code-profile get WRONG > $dir/out 2>&1 || return 1
+    ! ceph osd erasure-code-profile get WRONG > $dir/out 2>&1 || return 1
     grep -q "unknown erasure code profile 'WRONG'" $dir/out || return 1
 }
 
@@ -132,7 +132,7 @@ function TEST_set_idempotent() {
     # ceph osd erasure-code-profile set: verify that it is idempotent,
     # as if it was using the same code path.
     #
-    ./ceph osd erasure-code-profile set default k=2 m=1 2>&1 || return 1
+    ceph osd erasure-code-profile set default k=2 m=1 2>&1 || return 1
     local profile
     #
     # Because plugin=jerasure is the default, it uses a slightly
@@ -140,21 +140,21 @@ function TEST_set_idempotent() {
     # implicitly.
     #
     profile=profileidempotent1
-    ! ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph osd erasure-code-profile set $profile k=2 ruleset-failure-domain=osd 2>&1 || return 1
-    ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph osd erasure-code-profile set $profile k=2 ruleset-failure-domain=osd 2>&1 || return 1
-    ./ceph osd erasure-code-profile rm $profile # cleanup
+    ! ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph osd erasure-code-profile set $profile k=2 ruleset-failure-domain=osd 2>&1 || return 1
+    ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph osd erasure-code-profile set $profile k=2 ruleset-failure-domain=osd 2>&1 || return 1
+    ceph osd erasure-code-profile rm $profile # cleanup
 
     #
     # In the general case the profile is exactly what is on
     #
     profile=profileidempotent2
-    ! ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 ruleset-failure-domain=osd 2>&1 || return 1
-    ./ceph osd erasure-code-profile ls | grep $profile || return 1
-    ./ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 ruleset-failure-domain=osd 2>&1 || return 1
-    ./ceph osd erasure-code-profile rm $profile # cleanup
+    ! ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 ruleset-failure-domain=osd 2>&1 || return 1
+    ceph osd erasure-code-profile ls | grep $profile || return 1
+    ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 ruleset-failure-domain=osd 2>&1 || return 1
+    ceph osd erasure-code-profile rm $profile # cleanup
 }
 
 function TEST_format_invalid() {
@@ -165,7 +165,7 @@ function TEST_format_invalid() {
     # valid JSON but not of the expected type
     run_mon $dir a \
         --osd_pool_default_erasure-code-profile 1 || return 1
-    ! ./ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
+    ! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
     cat $dir/out
     grep 'must be a JSON object' $dir/out || return 1
 }
@@ -177,7 +177,7 @@ function TEST_format_json() {
     expected='"plugin":"example"'
     run_mon $dir a \
         --osd_pool_default_erasure-code-profile "{$expected}" || return 1
-    ./ceph --format json osd erasure-code-profile get default | \
+    ceph --format json osd erasure-code-profile get default | \
         grep "$expected" || return 1
 }
 
@@ -188,7 +188,7 @@ function TEST_format_plain() {
     expected='"plugin":"example"'
     run_mon $dir a \
         --osd_pool_default_erasure-code-profile "plugin=example" || return 1
-    ./ceph --format json osd erasure-code-profile get default | \
+    ceph --format json osd erasure-code-profile get default | \
         grep "$expected" || return 1
 }
 
@@ -199,7 +199,7 @@ function TEST_profile_k_sanity() {
     run_mon $dir a || return 1
 
     expect_failure $dir 'k must be a multiple of (k + m) / l' \
-        ./ceph osd erasure-code-profile set $profile \
+        ceph osd erasure-code-profile set $profile \
         plugin=lrc \
         l=1 \
         k=1 \
@@ -207,7 +207,7 @@ function TEST_profile_k_sanity() {
 
     if erasure_code_plugin_exists isa ; then
         expect_failure $dir 'k=1 must be >= 2' \
-            ./ceph osd erasure-code-profile set $profile \
+            ceph osd erasure-code-profile set $profile \
             plugin=isa \
             k=1 \
             m=1 || return 1
@@ -216,7 +216,7 @@ function TEST_profile_k_sanity() {
     fi
 
     expect_failure $dir 'k=1 must be >= 2' \
-        ./ceph osd erasure-code-profile set $profile \
+        ceph osd erasure-code-profile set $profile \
         plugin=jerasure \
         k=1 \
         m=1 || return 1
diff --git a/src/test/mon/osd-pool-create.sh b/src/test/mon/osd-pool-create.sh
index 0d11a23..4a19d13 100755
--- a/src/test/mon/osd-pool-create.sh
+++ b/src/test/mon/osd-pool-create.sh
@@ -15,7 +15,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -40,9 +40,9 @@ function TEST_default_deprectated_0() {
     local expected=66
     run_mon $dir a \
         --osd_pool_default_crush_replicated_ruleset $expected || return 1
-    ./ceph osd pool get rbd crush_ruleset | grep 'ruleset: '$expected || return 1
-    ./ceph osd crush rule dump replicated_ruleset | grep '"ruleset": '$expected || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    ceph osd pool get rbd crush_ruleset | grep 'ruleset: '$expected || return 1
+    ceph osd crush rule dump replicated_ruleset | grep '"ruleset": '$expected || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     ! grep "osd_pool_default_crush_rule is deprecated " $dir/mon.a.log || return 1
 }
 
@@ -52,9 +52,9 @@ function TEST_default_deprectated_1() {
     local expected=55
     run_mon $dir a \
         --osd_pool_default_crush_rule $expected || return 1
-    ./ceph osd pool get rbd crush_ruleset | grep 'ruleset: '$expected || return 1
-    ./ceph osd crush rule dump replicated_ruleset | grep '"ruleset": '$expected || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    ceph osd pool get rbd crush_ruleset | grep 'ruleset: '$expected || return 1
+    ceph osd crush rule dump replicated_ruleset | grep '"ruleset": '$expected || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep "osd_pool_default_crush_rule is deprecated " $dir/mon.a.log || return 1
 }
 
@@ -65,10 +65,10 @@ function TEST_default_deprectated_2() {
     run_mon $dir a \
         --osd_pool_default_crush_rule $expected \
         --osd_pool_default_crush_replicated_ruleset $unexpected || return 1
-    ./ceph osd pool get rbd crush_ruleset | grep 'ruleset: '$expected || return 1
-    ! ./ceph --format json osd dump | grep '"crush_ruleset":'$unexpected || return 1
-    ./ceph osd crush rule dump replicated_ruleset | grep '"ruleset": '$expected || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    ceph osd pool get rbd crush_ruleset | grep 'ruleset: '$expected || return 1
+    ! ceph --format json osd dump | grep '"crush_ruleset":'$unexpected || return 1
+    ceph osd crush rule dump replicated_ruleset | grep '"ruleset": '$expected || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep "osd_pool_default_crush_rule is deprecated " $dir/mon.a.log || return 1
 }
 
@@ -78,8 +78,8 @@ function TEST_erasure_invalid_profile() {
     run_mon $dir a || return 1
     local poolname=pool_erasure
     local notaprofile=not-a-valid-erasure-code-profile
-    ! ./ceph osd pool create $poolname 12 12 erasure $notaprofile || return 1
-    ! ./ceph osd erasure-code-profile ls | grep $notaprofile || return 1
+    ! ceph osd pool create $poolname 12 12 erasure $notaprofile || return 1
+    ! ceph osd erasure-code-profile ls | grep $notaprofile || return 1
 }
 
 function TEST_erasure_crush_rule() {
@@ -89,45 +89,45 @@ function TEST_erasure_crush_rule() {
     # choose the crush ruleset used with an erasure coded pool
     #
     local crush_ruleset=myruleset
-    ! ./ceph osd crush rule ls | grep $crush_ruleset || return 1
-    ./ceph osd crush rule create-erasure $crush_ruleset
-    ./ceph osd crush rule ls | grep $crush_ruleset
+    ! ceph osd crush rule ls | grep $crush_ruleset || return 1
+    ceph osd crush rule create-erasure $crush_ruleset
+    ceph osd crush rule ls | grep $crush_ruleset
     local poolname
     poolname=pool_erasure1
-    ! ./ceph --format json osd dump | grep '"crush_ruleset":1' || return 1
-    ./ceph osd pool create $poolname 12 12 erasure default $crush_ruleset
-    ./ceph --format json osd dump | grep '"crush_ruleset":1' || return 1
+    ! ceph --format json osd dump | grep '"crush_ruleset":1' || return 1
+    ceph osd pool create $poolname 12 12 erasure default $crush_ruleset
+    ceph --format json osd dump | grep '"crush_ruleset":1' || return 1
     #
     # a crush ruleset by the name of the pool is implicitly created
     #
     poolname=pool_erasure2
-    ./ceph osd erasure-code-profile set myprofile
-    ./ceph osd pool create $poolname 12 12 erasure myprofile
-    ./ceph osd crush rule ls | grep $poolname || return 1
+    ceph osd erasure-code-profile set myprofile
+    ceph osd pool create $poolname 12 12 erasure myprofile
+    ceph osd crush rule ls | grep $poolname || return 1
     #
     # a non existent crush ruleset given in argument is an error
     # http://tracker.ceph.com/issues/9304
     #
     poolname=pool_erasure3
-    ! ./ceph osd pool create $poolname 12 12 erasure myprofile INVALIDRULESET || return 1
+    ! ceph osd pool create $poolname 12 12 erasure myprofile INVALIDRULESET || return 1
 }
 
 function TEST_erasure_code_profile_default() {
     local dir=$1
     run_mon $dir a || return 1
-    ./ceph osd erasure-code-profile rm default || return 1
-    ! ./ceph osd erasure-code-profile ls | grep default || return 1
-    ./ceph osd pool create $poolname 12 12 erasure default
-    ./ceph osd erasure-code-profile ls | grep default || return 1
+    ceph osd erasure-code-profile rm default || return 1
+    ! ceph osd erasure-code-profile ls | grep default || return 1
+    ceph osd pool create $poolname 12 12 erasure default
+    ceph osd erasure-code-profile ls | grep default || return 1
 }
 
 function TEST_erasure_crush_stripe_width() {
     local dir=$1
     # the default stripe width is used to initialize the pool
     run_mon $dir a --public-addr $CEPH_MON
-    stripe_width=$(./ceph-conf --show-config-value osd_pool_erasure_code_stripe_width)
-    ./ceph osd pool create pool_erasure 12 12 erasure
-    ./ceph --format json osd dump | tee $dir/osd.json
+    stripe_width=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_width)
+    ceph osd pool create pool_erasure 12 12 erasure
+    ceph --format json osd dump | tee $dir/osd.json
     grep '"stripe_width":'$stripe_width $dir/osd.json > /dev/null || return 1
 }
 
@@ -146,24 +146,24 @@ function TEST_erasure_crush_stripe_width_padded() {
     run_mon $dir a \
         --osd_pool_erasure_code_stripe_width $desired_stripe_width \
         --osd_pool_default_erasure_code_profile "$profile" || return 1
-    ./ceph osd pool create pool_erasure 12 12 erasure
-    ./ceph osd dump | tee $dir/osd.json
+    ceph osd pool create pool_erasure 12 12 erasure
+    ceph osd dump | tee $dir/osd.json
     grep "stripe_width $actual_stripe_width" $dir/osd.json > /dev/null || return 1
 }
 
 function TEST_erasure_code_pool() {
     local dir=$1
     run_mon $dir a || return 1
-    ./ceph --format json osd dump > $dir/osd.json
+    ceph --format json osd dump > $dir/osd.json
     local expected='"erasure_code_profile":"default"'
     ! grep "$expected" $dir/osd.json || return 1
-    ./ceph osd pool create erasurecodes 12 12 erasure
-    ./ceph --format json osd dump | tee $dir/osd.json
+    ceph osd pool create erasurecodes 12 12 erasure
+    ceph --format json osd dump | tee $dir/osd.json
     grep "$expected" $dir/osd.json > /dev/null || return 1
 
-    ./ceph osd pool create erasurecodes 12 12 erasure 2>&1 | \
+    ceph osd pool create erasurecodes 12 12 erasure 2>&1 | \
         grep 'already exists' || return 1
-    ./ceph osd pool create erasurecodes 12 12 2>&1 | \
+    ceph osd pool create erasurecodes 12 12 2>&1 | \
         grep 'cannot change to type replicated' || return 1
 }
 
@@ -172,18 +172,18 @@ function TEST_replicated_pool_with_ruleset() {
     run_mon $dir a
     local ruleset=ruleset0
     local root=host1
-    ./ceph osd crush add-bucket $root host
+    ceph osd crush add-bucket $root host
     local failure_domain=osd
     local poolname=mypool
-    ./ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1
-    ./ceph osd crush rule ls | grep $ruleset
-    ./ceph osd pool create $poolname 12 12 replicated $ruleset 2>&1 | \
+    ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1
+    ceph osd crush rule ls | grep $ruleset
+    ceph osd pool create $poolname 12 12 replicated $ruleset 2>&1 | \
         grep "pool 'mypool' created" || return 1
-    rule_id=`./ceph osd crush rule dump $ruleset | grep "rule_id" | awk -F[' ':,] '{print $4}'`
-    ./ceph osd pool get $poolname crush_ruleset  2>&1 | \
+    rule_id=`ceph osd crush rule dump $ruleset | grep "rule_id" | awk -F[' ':,] '{print $4}'`
+    ceph osd pool get $poolname crush_ruleset  2>&1 | \
         grep "crush_ruleset: $rule_id" || return 1
     #non-existent crush ruleset
-    ./ceph osd pool create newpool 12 12 replicated non-existent 2>&1 | \
+    ceph osd pool create newpool 12 12 replicated non-existent 2>&1 | \
         grep "doesn't exist" || return 1
 }
 
@@ -191,11 +191,11 @@ function TEST_replicated_pool_with_non_existent_default_ruleset_0() {
     local dir=$1
     run_mon $dir a || return 1
     # change the default crush rule
-    ./ceph tell mon.a injectargs -- \
+    ceph tell mon.a injectargs -- \
         --osd_pool_default_crush_replicated_ruleset 66 || return 1
-    ./ceph osd pool create mypool 12 12 replicated 2>&1 | \
+    ceph osd pool create mypool 12 12 replicated 2>&1 | \
         grep "No suitable CRUSH ruleset exists" || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     ! grep "osd_pool_default_crush_rule is deprecated " $dir/mon.a.log || return 1
 }
 
@@ -203,23 +203,23 @@ function TEST_replicated_pool_with_non_existent_default_ruleset_1() {
     local dir=$1
     run_mon $dir a || return 1
     # change the default crush rule using deprecated option
-    ./ceph tell mon.a injectargs -- \
+    ceph tell mon.a injectargs -- \
         --osd_pool_default_crush_rule 55 || return 1
-    ./ceph osd pool create mypool 12 12 replicated 2>&1 | \
+    ceph osd pool create mypool 12 12 replicated 2>&1 | \
         grep "No suitable CRUSH ruleset exists" || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep "osd_pool_default_crush_rule is deprecated " $dir/mon.a.log || return 1
 }
 
 function TEST_replicated_pool_with_non_existent_default_ruleset_2() {
     local dir=$1
     run_mon $dir a || return 1
-    ./ceph tell mon.a injectargs -- \
+    ceph tell mon.a injectargs -- \
         --osd_pool_default_crush_rule 77 \
         --osd_pool_default_crush_replicated_ruleset 33 || return 1
-    ./ceph osd pool create mypool 12 12 replicated 2>&1 | \
+    ceph osd pool create mypool 12 12 replicated 2>&1 | \
         grep "No suitable CRUSH ruleset exists" || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
     grep "osd_pool_default_crush_rule is deprecated " $dir/mon.a.log || return 1
 }
 
@@ -227,46 +227,46 @@ function TEST_erasure_code_pool_lrc() {
     local dir=$1
     run_mon $dir a || return 1
 
-    ./ceph osd erasure-code-profile set LRCprofile \
+    ceph osd erasure-code-profile set LRCprofile \
              plugin=lrc \
              mapping=DD_ \
              layers='[ [ "DDc", "" ] ]' || return 1
 
-    ./ceph --format json osd dump > $dir/osd.json
+    ceph --format json osd dump > $dir/osd.json
     local expected='"erasure_code_profile":"LRCprofile"'
     local poolname=erasurecodes
     ! grep "$expected" $dir/osd.json || return 1
-    ./ceph osd pool create $poolname 12 12 erasure LRCprofile
-    ./ceph --format json osd dump | tee $dir/osd.json
+    ceph osd pool create $poolname 12 12 erasure LRCprofile
+    ceph --format json osd dump | tee $dir/osd.json
     grep "$expected" $dir/osd.json > /dev/null || return 1
-    ./ceph osd crush rule ls | grep $poolname || return 1
+    ceph osd crush rule ls | grep $poolname || return 1
 }
 
 function TEST_replicated_pool() {
     local dir=$1
     run_mon $dir a || return 1
-    ./ceph osd pool create replicated 12 12 replicated replicated_ruleset 2>&1 | \
+    ceph osd pool create replicated 12 12 replicated replicated_ruleset 2>&1 | \
         grep "pool 'replicated' created" || return 1
-    ./ceph osd pool create replicated 12 12 replicated replicated_ruleset 2>&1 | \
+    ceph osd pool create replicated 12 12 replicated replicated_ruleset 2>&1 | \
         grep 'already exists' || return 1
     # default is replicated
-    ./ceph osd pool create replicated1 12 12 2>&1 | \
+    ceph osd pool create replicated1 12 12 2>&1 | \
         grep "pool 'replicated1' created" || return 1
     # default is replicated, pgp_num = pg_num
-    ./ceph osd pool create replicated2 12 2>&1 | \
+    ceph osd pool create replicated2 12 2>&1 | \
         grep "pool 'replicated2' created" || return 1
-    ./ceph osd pool create replicated 12 12 erasure 2>&1 | \
+    ceph osd pool create replicated 12 12 erasure 2>&1 | \
         grep 'cannot change to type erasure' || return 1
 }
 
 function TEST_no_pool_delete() {
     local dir=$1
     run_mon $dir a || return 1
-    ./ceph osd pool create foo 1 || return 1
-    ./ceph tell mon.a injectargs -- --no-mon-allow-pool-delete || return 1
-    ! ./ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
-    ./ceph tell mon.a injectargs -- --mon-allow-pool-delete || return 1
-    ./ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
+    ceph osd pool create foo 1 || return 1
+    ceph tell mon.a injectargs -- --no-mon-allow-pool-delete || return 1
+    ! ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
+    ceph tell mon.a injectargs -- --mon-allow-pool-delete || return 1
+    ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
 }
 
 function TEST_utf8_cli() {
@@ -276,13 +276,13 @@ function TEST_utf8_cli() {
     # the fix for http://tracker.ceph.com/issues/7387.  If it turns out
     # to not be OK (when is the default encoding *not* UTF-8?), maybe
     # the character '黄' can be replaced with the escape $'\xe9\xbb\x84'
-    ./ceph osd pool create 黄 1024 2>&1 | \
+    ceph osd pool create 黄 1024 2>&1 | \
         grep "pool '黄' created" || return 1
-    ./ceph osd lspools 2>&1 | \
+    ceph osd lspools 2>&1 | \
         grep "黄" || return 1
-    ./ceph -f json-pretty osd dump | \
+    ceph -f json-pretty osd dump | \
         python -c "import json; import sys; json.load(sys.stdin)" || return 1
-    ./ceph osd pool delete 黄 黄 --yes-i-really-really-mean-it
+    ceph osd pool delete 黄 黄 --yes-i-really-really-mean-it
 }
 
 main osd-pool-create "$@"
diff --git a/src/test/opensuse-13.2/ceph.spec.in b/src/test/opensuse-13.2/ceph.spec.in
index 3a5a6f7..26928f7 100644
--- a/src/test/opensuse-13.2/ceph.spec.in
+++ b/src/test/opensuse-13.2/ceph.spec.in
@@ -1040,6 +1040,7 @@ if ! getent passwd ceph >/dev/null ; then
     CEPH_USER_ID_OPTION=""
     getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
     useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
+fi
 %endif
 exit 0
 
diff --git a/src/test/osd/TestRados.cc b/src/test/osd/TestRados.cc
index 1833828..57bf3e5 100644
--- a/src/test/osd/TestRados.cc
+++ b/src/test/osd/TestRados.cc
@@ -56,10 +56,7 @@ public:
       oid << m_op;
       if (m_op % 2) {
 	// make it a long name
-	oid << " ";
-	for (unsigned i = 0; i < 300; ++i) {
-	  oid << i;
-	}
+	oid << " " << string(300, 'o');
       }
       cout << m_op << ": write initial oid " << oid.str() << std::endl;
       context.oid_not_flushing.insert(oid.str());
diff --git a/src/test/osd/osd-bench.sh b/src/test/osd/osd-bench.sh
index 0fb5ab8..fd1f4a6 100755
--- a/src/test/osd/osd-bench.sh
+++ b/src/test/osd/osd-bench.sh
@@ -16,7 +16,7 @@
 # GNU Library Public License for more details.
 #
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -41,20 +41,20 @@ function TEST_bench() {
     run_mon $dir a || return 1
     run_osd $dir 0 || return 1
 
-    local osd_bench_small_size_max_iops=$(CEPH_ARGS='' ./ceph-conf \
+    local osd_bench_small_size_max_iops=$(CEPH_ARGS='' ceph-conf \
         --show-config-value osd_bench_small_size_max_iops)
-    local osd_bench_large_size_max_throughput=$(CEPH_ARGS='' ./ceph-conf \
+    local osd_bench_large_size_max_throughput=$(CEPH_ARGS='' ceph-conf \
         --show-config-value osd_bench_large_size_max_throughput)
-    local osd_bench_max_block_size=$(CEPH_ARGS='' ./ceph-conf \
+    local osd_bench_max_block_size=$(CEPH_ARGS='' ceph-conf \
         --show-config-value osd_bench_max_block_size)
-    local osd_bench_duration=$(CEPH_ARGS='' ./ceph-conf \
+    local osd_bench_duration=$(CEPH_ARGS='' ceph-conf \
         --show-config-value osd_bench_duration)
 
     #
     # block size too high
     #
     expect_failure $dir osd_bench_max_block_size \
-        ./ceph tell osd.0 bench 1024 $((osd_bench_max_block_size + 1)) || return 1
+        ceph tell osd.0 bench 1024 $((osd_bench_max_block_size + 1)) || return 1
 
     #
     # count too high for small (< 1MB) block sizes
@@ -62,7 +62,7 @@ function TEST_bench() {
     local bsize=1024
     local max_count=$(($bsize * $osd_bench_duration * $osd_bench_small_size_max_iops))
     expect_failure $dir bench_small_size_max_iops \
-        ./ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
+        ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
 
     #
     # count too high for large (>= 1MB) block sizes
@@ -70,12 +70,12 @@ function TEST_bench() {
     local bsize=$((1024 * 1024 + 1))
     local max_count=$(($osd_bench_large_size_max_throughput * $osd_bench_duration))
     expect_failure $dir osd_bench_large_size_max_throughput \
-        ./ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
+        ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
 
     #
     # default values should work
     #
-    ./ceph tell osd.0 bench || return 1
+    ceph tell osd.0 bench || return 1
 }
 
 main osd-bench "$@"
diff --git a/src/test/osd/osd-config.sh b/src/test/osd/osd-config.sh
index 3cb7fa6..8f2f0db 100755
--- a/src/test/osd/osd-config.sh
+++ b/src/test/osd/osd-config.sh
@@ -16,7 +16,7 @@
 # GNU Library Public License for more details.
 #
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -47,7 +47,7 @@ function TEST_config_init() {
         --osd-map-cache-size $cache \
         --osd-pg-epoch-persisted-max-stale $stale \
         || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
     grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
     grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
 }
@@ -58,30 +58,30 @@ function TEST_config_track() {
     run_mon $dir a || return 1
     run_osd $dir 0 || return 1
 
-    local osd_map_cache_size=$(CEPH_ARGS='' ./ceph-conf \
+    local osd_map_cache_size=$(CEPH_ARGS='' ceph-conf \
         --show-config-value osd_map_cache_size)
-    local osd_map_max_advance=$(CEPH_ARGS='' ./ceph-conf \
+    local osd_map_max_advance=$(CEPH_ARGS='' ceph-conf \
         --show-config-value osd_map_max_advance)
-    local osd_pg_epoch_persisted_max_stale=$(CEPH_ARGS='' ./ceph-conf \
+    local osd_pg_epoch_persisted_max_stale=$(CEPH_ARGS='' ceph-conf \
         --show-config-value osd_pg_epoch_persisted_max_stale)
     #
     # lower cache_size under max_advance to trigger the warning
     #
     ! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
     local cache=$(($osd_map_max_advance / 2))
-    ./ceph tell osd.0 injectargs "--osd-map-cache-size $cache" || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+    ceph tell osd.0 injectargs "--osd-map-cache-size $cache" || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
     grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
     rm $dir/osd.0.log
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log reopen || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log reopen || return 1
 
     #
     # reset cache_size to the default and assert that it does not trigger the warning
     #
     ! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
     local cache=$osd_map_cache_size
-    ./ceph tell osd.0 injectargs "--osd-map-cache-size $cache" || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+    ceph tell osd.0 injectargs "--osd-map-cache-size $cache" || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
     ! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
 
     #
@@ -89,8 +89,8 @@ function TEST_config_track() {
     #
     ! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
     local advance=$(($osd_map_cache_size * 2))
-    ./ceph tell osd.0 injectargs "--osd-map-max-advance $advance" || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+    ceph tell osd.0 injectargs "--osd-map-max-advance $advance" || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
     grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
 
     #
@@ -99,7 +99,7 @@ function TEST_config_track() {
     ! grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
     local stale=$(($osd_map_cache_size * 2))
     ceph tell osd.0 injectargs "--osd-pg-epoch-persisted-max-stale $stale" || return 1
-    CEPH_ARGS='' ./ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+    CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
     grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
 }
 
diff --git a/src/test/osd/osd-copy-from.sh b/src/test/osd/osd-copy-from.sh
index 375ad44..2fcf2a8 100755
--- a/src/test/osd/osd-copy-from.sh
+++ b/src/test/osd/osd-copy-from.sh
@@ -17,7 +17,7 @@
 # GNU Library Public License for more details.
 #
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -44,19 +44,19 @@ function TEST_copy_from() {
     run_osd $dir 1 || return 1
 
     # success
-    ./rados -p rbd put foo rados
-    ./rados -p rbd cp foo foo2
-    ./rados -p rbd stat foo2
+    rados -p rbd put foo rados
+    rados -p rbd cp foo foo2
+    rados -p rbd stat foo2
 
     # failure
-    ./ceph tell osd.\* injectargs -- --osd-debug-inject-copyfrom-error
-    ! ./rados -p rbd cp foo foo3
-    ! ./rados -p rbd stat foo3
+    ceph tell osd.\* injectargs -- --osd-debug-inject-copyfrom-error
+    ! rados -p rbd cp foo foo3
+    ! rados -p rbd stat foo3
 
     # success again
-    ./ceph tell osd.\* injectargs -- --no-osd-debug-inject-copyfrom-error
-    ! ./rados -p rbd cp foo foo3
-    ./rados -p rbd stat foo3
+    ceph tell osd.\* injectargs -- --no-osd-debug-inject-copyfrom-error
+    ! rados -p rbd cp foo foo3
+    rados -p rbd stat foo3
 }
 
 main osd-copy-from "$@"
diff --git a/src/test/osd/osd-markdown.sh b/src/test/osd/osd-markdown.sh
index 7db2cb0..5cd5c6b 100755
--- a/src/test/osd/osd-markdown.sh
+++ b/src/test/osd/osd-markdown.sh
@@ -16,7 +16,7 @@
 # GNU Library Public License for more details.
 #
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
@@ -42,10 +42,10 @@ function markdown_N_impl() {
   for i in `seq 1 $markdown_times`
   do
     # check the OSD is UP
-    ./ceph osd tree
-    ./ceph osd tree | grep osd.0 |grep up || return 1
+    ceph osd tree
+    ceph osd tree | grep osd.0 |grep up || return 1
     # mark the OSD down.
-    ./ceph osd down 0
+    ceph osd down 0
     sleep $sleeptime
   done
 }
@@ -66,7 +66,8 @@ function TEST_markdown_exceed_maxdown_count() {
     ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
 
     markdown_N_impl $(($count+1)) $period $sleeptime
-    ./ceph osd tree | grep down | grep osd.0 || return 1
+    # down N+1 times ,the osd.0 shoud die
+    ceph osd tree | grep down | grep osd.0 || return 1
 }
 
 function TEST_markdown_boot() {
@@ -86,7 +87,7 @@ function TEST_markdown_boot() {
 
     markdown_N_impl $count $period $sleeptime
     #down N times, osd.0 should be up
-    ./ceph osd tree | grep up | grep osd.0 || return 1
+    ceph osd tree | grep up | grep osd.0 || return 1
 }
 
 function TEST_markdown_boot_exceed_time() {
@@ -106,7 +107,7 @@ function TEST_markdown_boot_exceed_time() {
     ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
 
     markdown_N_impl $(($count+1)) $period $sleeptime
-    ./ceph osd tree | grep up | grep osd.0 || return 1
+    ceph osd tree | grep up | grep osd.0 || return 1
 }
 
 main osd-markdown "$@"
diff --git a/src/test/osd/osd-reactivate.sh b/src/test/osd/osd-reactivate.sh
index 9bc2933..e0aff3f 100755
--- a/src/test/osd/osd-reactivate.sh
+++ b/src/test/osd/osd-reactivate.sh
@@ -13,7 +13,7 @@
 # GNU Library Public License for more details.
 #
 
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
diff --git a/src/test/osd/osd-reuse-id.sh b/src/test/osd/osd-reuse-id.sh
index 8efdf5c..5914597 100755
--- a/src/test/osd/osd-reuse-id.sh
+++ b/src/test/osd/osd-reuse-id.sh
@@ -14,7 +14,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
diff --git a/src/test/osd/osd-scrub-repair.sh b/src/test/osd/osd-scrub-repair.sh
index 3b9b1d8..03580c3 100755
--- a/src/test/osd/osd-scrub-repair.sh
+++ b/src/test/osd/osd-scrub-repair.sh
@@ -14,7 +14,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
diff --git a/src/test/osd/osd-scrub-snaps.sh b/src/test/osd/osd-scrub-snaps.sh
index 71eeb19..d422448 100755
--- a/src/test/osd/osd-scrub-snaps.sh
+++ b/src/test/osd/osd-scrub-snaps.sh
@@ -14,7 +14,7 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
diff --git a/src/test/pybind/test_ceph_argparse.py b/src/test/pybind/test_ceph_argparse.py
index 4bf6c27..4c325f2 100755
--- a/src/test/pybind/test_ceph_argparse.py
+++ b/src/test/pybind/test_ceph_argparse.py
@@ -25,7 +25,8 @@ import re
 import json
 
 def get_command_descriptions(what):
-    return os.popen("./get_command_descriptions " + "--" + what).read()
+    CEPH_BIN = os.environ['CEPH_BIN']
+    return os.popen(CEPH_BIN + "/get_command_descriptions " + "--" + what).read()
 
 def test_parse_json_funcsigs():
     commands = get_command_descriptions("all")
diff --git a/src/test/rbd_mirror/image_replay.cc b/src/test/rbd_mirror/image_replay.cc
index 88c6ac7..e7eab87 100644
--- a/src/test/rbd_mirror/image_replay.cc
+++ b/src/test/rbd_mirror/image_replay.cc
@@ -185,8 +185,9 @@ int main(int argc, const char **argv)
   threads = new rbd::mirror::Threads(reinterpret_cast<CephContext*>(
     local->cct()));
   replayer = new rbd::mirror::ImageReplayer<>(threads, local, remote, client_id,
-					      "", local_pool_id, remote_pool_id,
-					      remote_image_id,
+					      "remote mirror uuid",
+                                              local_pool_id, remote_pool_id,
+                                              remote_image_id,
                                               "global image id");
 
   replayer->start(&start_cond, &bootstap_params);
diff --git a/src/test/test-ceph-helpers.sh b/src/test/test-ceph-helpers.sh
index 20c44ff..ec6be5f 100755
--- a/src/test/test-ceph-helpers.sh
+++ b/src/test/test-ceph-helpers.sh
@@ -17,4 +17,4 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-../qa/workunits/ceph-helpers.sh TESTS
+$CEPH_ROOT/qa/workunits/ceph-helpers.sh TESTS
diff --git a/src/test/test_objectstore_memstore.sh b/src/test/test_objectstore_memstore.sh
index a0f062a..54eff47 100755
--- a/src/test/test_objectstore_memstore.sh
+++ b/src/test/test_objectstore_memstore.sh
@@ -1,6 +1,6 @@
 #!/bin/sh -ex
 
 rm -rf store_test_temp_dir
-./ceph_test_objectstore --gtest_filter=\*/0
+ceph_test_objectstore --gtest_filter=\*/0
 
 echo OK
diff --git a/src/test/test_pidfile.sh b/src/test/test_pidfile.sh
index 19a3b13..5a90444 100755
--- a/src/test/test_pidfile.sh
+++ b/src/test/test_pidfile.sh
@@ -5,7 +5,7 @@
 #
 
 # Includes
-source ../qa/workunits/ceph-helpers.sh
+source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
 
 function run() {
     local dir=$1
diff --git a/src/tools/ceph-monstore-update-crush.sh b/src/tools/ceph-monstore-update-crush.sh
index 7fc41e0..1508f06 100755
--- a/src/tools/ceph-monstore-update-crush.sh
+++ b/src/tools/ceph-monstore-update-crush.sh
@@ -34,10 +34,10 @@ function osdmap_get() {
     local epoch=${3:+-v $3}
     local osdmap=`mktemp`
 
-    ceph-monstore-tool $store_path get osdmap -- \
+    $CEPH_BIN/ceph-monstore-tool $store_path get osdmap -- \
                        $epoch -o $osdmap > /dev/null || return
 
-    echo $(osdmaptool --dump xml $osdmap 2> /dev/null | \
+    echo $($CEPH_BIN/osdmaptool --dump xml $osdmap 2> /dev/null | \
            $XMLSTARLET sel -t -m "$query" -v .)
 
     rm -f $osdmap
@@ -50,11 +50,11 @@ function test_crush() {
     local crush=$4
     local osdmap=`mktemp`
 
-    ceph-monstore-tool $store_path get osdmap -- \
+    $CEPH_BIN/ceph-monstore-tool $store_path get osdmap -- \
                        -v $epoch -o $osdmap > /dev/null
-    osdmaptool --export-crush $crush $osdmap &> /dev/null
+    $CEPH_BIN/osdmaptool --export-crush $crush $osdmap &> /dev/null
 
-    if crushtool --test --check $max_osd -i $crush > /dev/null; then
+    if $CEPH_BIN/crushtool --test --check $max_osd -i $crush > /dev/null; then
         good=true
     else
         good=false
@@ -157,14 +157,14 @@ function main() {
     if test $good_epoch -eq $last_osdmap_epoch; then
         echo "and mon store has no faulty crush maps."
     elif test $output; then
-        crushtool --decompile $good_crush --outfn $output
+        $CEPH_BIN/crushtool --decompile $good_crush --outfn $output
     elif test $rewrite; then
-        ceph-monstore-tool $store_path rewrite-crush --  \
+        $CEPH_BIN/ceph-monstore-tool $store_path rewrite-crush --  \
                            --crush $good_crush      \
                            --good-epoch $good_epoch
     else
         echo
-        crushtool --decompile $good_crush
+        $CEPH_BIN/crushtool --decompile $good_crush
     fi
     rm -f $good_crush
 }
diff --git a/src/tools/rbd/Utils.cc b/src/tools/rbd/Utils.cc
index 1b6df94..f06e857 100644
--- a/src/tools/rbd/Utils.cc
+++ b/src/tools/rbd/Utils.cc
@@ -80,8 +80,32 @@ int read_string(int fd, unsigned max, std::string *out) {
 }
 
 int extract_spec(const std::string &spec, std::string *pool_name,
-                 std::string *image_name, std::string *snap_name) {
-  boost::regex pattern("^(?:([^/@]+)/)?([^/@]+)(?:@([^/@]+))?$");
+                 std::string *image_name, std::string *snap_name,
+                 SpecValidation spec_validation) {
+  if (!g_ceph_context->_conf->rbd_validate_names) {
+    spec_validation = SPEC_VALIDATION_NONE;
+  }
+
+  boost::regex pattern;
+  switch (spec_validation) {
+  case SPEC_VALIDATION_FULL:
+    // disallow "/" and "@" in image and snap name
+    pattern = "^(?:([^/@]+)/)?([^/@]+)(?:@([^/@]+))?$";
+    break;
+  case SPEC_VALIDATION_SNAP:
+    // disallow "/" and "@" in snap name
+    pattern = "^(?:([^/]+)/)?([^@]+)(?:@([^/@]+))?$";
+    break;
+  case SPEC_VALIDATION_NONE:
+    // relaxed pattern assumes pool is before first "/" and snap
+    // name is after first "@"
+    pattern = "^(?:([^/]+)/)?([^@]+)(?:@(.+))?$";
+    break;
+  default:
+    assert(false);
+    break;
+  }
+
   boost::smatch match;
   if (!boost::regex_match(spec, match, pattern)) {
     std::cerr << "rbd: invalid spec '" << spec << "'" << std::endl;
@@ -139,6 +163,7 @@ int get_pool_image_snapshot_names(const po::variables_map &vm,
                                   std::string *image_name,
                                   std::string *snap_name,
                                   SnapshotPresence snapshot_presence,
+                                  SpecValidation spec_validation,
                                   bool image_required) {
   std::string pool_key = (mod == at::ARGUMENT_MODIFIER_DEST ?
     at::DEST_POOL_NAME : at::POOL_NAME);
@@ -156,20 +181,24 @@ int get_pool_image_snapshot_names(const po::variables_map &vm,
   if (vm.count(snap_key) && snap_name != nullptr) {
      *snap_name = vm[snap_key].as<std::string>();
    }
-  
+
+  int r;
   if (image_name != nullptr && !image_name->empty()) {
     // despite the separate pool and snapshot name options,
     // we can also specify them via the image option
     std::string image_name_copy(*image_name);
-    extract_spec(image_name_copy, pool_name, image_name, snap_name);
+    r = extract_spec(image_name_copy, pool_name, image_name, snap_name,
+                     spec_validation);
+    if (r < 0) {
+      return r;
+    }
   }
 
-  int r;
   if (image_name != nullptr && spec_arg_index != nullptr &&
       image_name->empty()) {
     std::string spec = get_positional_argument(vm, (*spec_arg_index)++);
     if (!spec.empty()) {
-      r = extract_spec(spec, pool_name, image_name, snap_name);
+      r = extract_spec(spec, pool_name, image_name, snap_name, spec_validation);
       if (r < 0) {
         return r;
       }
@@ -221,26 +250,35 @@ int get_pool_journal_names(const po::variables_map &vm,
     image_name = vm[image_key].as<std::string>();
   }
 
+  int r;
   if (journal_name != nullptr && !journal_name->empty()) {
     // despite the separate pool option,
     // we can also specify them via the journal option
     std::string journal_name_copy(*journal_name);
-    extract_spec(journal_name_copy, pool_name, journal_name, nullptr);
+    r = extract_spec(journal_name_copy, pool_name, journal_name, nullptr,
+                     SPEC_VALIDATION_FULL);
+    if (r < 0) {
+      return r;
+    }
   }
 
   if (!image_name.empty()) {
     // despite the separate pool option,
     // we can also specify them via the image option
     std::string image_name_copy(image_name);
-    extract_spec(image_name_copy, pool_name, &image_name, nullptr);
+    r = extract_spec(image_name_copy, pool_name, &image_name, nullptr,
+                     SPEC_VALIDATION_NONE);
+    if (r < 0) {
+      return r;
+    }
   }
 
-  int r;
   if (journal_name != nullptr && spec_arg_index != nullptr &&
       journal_name->empty()) {
     std::string spec = get_positional_argument(vm, (*spec_arg_index)++);
     if (!spec.empty()) {
-      r = extract_spec(spec, pool_name, journal_name, nullptr);
+      r = extract_spec(spec, pool_name, journal_name, nullptr,
+                       SPEC_VALIDATION_FULL);
       if (r < 0) {
         return r;
       }
diff --git a/src/tools/rbd/Utils.h b/src/tools/rbd/Utils.h
index 0cd8a41..10ec2d2 100644
--- a/src/tools/rbd/Utils.h
+++ b/src/tools/rbd/Utils.h
@@ -22,6 +22,12 @@ enum SnapshotPresence {
   SNAPSHOT_PRESENCE_REQUIRED
 };
 
+enum SpecValidation {
+  SPEC_VALIDATION_FULL,
+  SPEC_VALIDATION_SNAP,
+  SPEC_VALIDATION_NONE
+};
+
 struct ProgressContext : public librbd::ProgressContext {
   const char *operation;
   bool progress;
@@ -41,7 +47,8 @@ void aio_context_callback(librbd::completion_t completion, void *arg);
 int read_string(int fd, unsigned max, std::string *out);
 
 int extract_spec(const std::string &spec, std::string *pool_name,
-                 std::string *image_name, std::string *snap_name);
+                 std::string *image_name, std::string *snap_name,
+                 SpecValidation spec_validation);
 
 std::string get_positional_argument(
     const boost::program_options::variables_map &vm, size_t index);
@@ -53,7 +60,8 @@ int get_pool_image_snapshot_names(
     const boost::program_options::variables_map &vm,
     argument_types::ArgumentModifier mod, size_t *spec_arg_index,
     std::string *pool_name, std::string *image_name, std::string *snap_name,
-    SnapshotPresence snapshot_presence, bool image_required = true);
+    SnapshotPresence snapshot_presence, SpecValidation spec_validation,
+    bool image_required = true);
 
 int get_pool_journal_names(
     const boost::program_options::variables_map &vm,
diff --git a/src/tools/rbd/action/BenchWrite.cc b/src/tools/rbd/action/BenchWrite.cc
index 849340b..c124f30 100644
--- a/src/tools/rbd/action/BenchWrite.cc
+++ b/src/tools/rbd/action/BenchWrite.cc
@@ -105,7 +105,7 @@ void rbd_bencher_completion(void *vc, void *pc)
   int ret = c->get_return_value();
   if (ret != 0) {
     cout << "write error: " << cpp_strerror(ret) << std::endl;
-    assert(0 == ret);
+    exit(ret < 0 ? -ret : ret);
   }
   b->lock.Lock();
   b->in_flight--;
@@ -251,7 +251,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Children.cc b/src/tools/rbd/action/Children.cc
index b930eb9..8e49eab 100644
--- a/src/tools/rbd/action/Children.cc
+++ b/src/tools/rbd/action/Children.cc
@@ -60,7 +60,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Clone.cc b/src/tools/rbd/action/Clone.cc
index 0ac991b..f399d58 100644
--- a/src/tools/rbd/action/Clone.cc
+++ b/src/tools/rbd/action/Clone.cc
@@ -44,7 +44,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_SOURCE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -54,7 +54,7 @@ int execute(const po::variables_map &vm) {
   std::string dst_snap_name;
   r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_DEST, &arg_index, &dst_pool_name, &dst_image_name,
-    &dst_snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &dst_snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Copy.cc b/src/tools/rbd/action/Copy.cc
index 7ab53ae..e45b008 100644
--- a/src/tools/rbd/action/Copy.cc
+++ b/src/tools/rbd/action/Copy.cc
@@ -45,7 +45,8 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_SOURCE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -55,7 +56,7 @@ int execute(const po::variables_map &vm) {
   std::string dst_snap_name;
   r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_DEST, &arg_index, &dst_pool_name, &dst_image_name,
-    &dst_snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &dst_snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Create.cc b/src/tools/rbd/action/Create.cc
index 5891939..dc3bb83 100644
--- a/src/tools/rbd/action/Create.cc
+++ b/src/tools/rbd/action/Create.cc
@@ -51,7 +51,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Diff.cc b/src/tools/rbd/action/Diff.cc
index cd0aeb2..3db9e30 100644
--- a/src/tools/rbd/action/Diff.cc
+++ b/src/tools/rbd/action/Diff.cc
@@ -93,7 +93,8 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/DiskUsage.cc b/src/tools/rbd/action/DiskUsage.cc
index 580192f..e694dd7 100644
--- a/src/tools/rbd/action/DiskUsage.cc
+++ b/src/tools/rbd/action/DiskUsage.cc
@@ -231,7 +231,7 @@ int execute(const po::variables_map &vm) {
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
     &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
-    false);
+    utils::SPEC_VALIDATION_NONE, false);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Export.cc b/src/tools/rbd/action/Export.cc
index 4fe5957..d6866e5 100644
--- a/src/tools/rbd/action/Export.cc
+++ b/src/tools/rbd/action/Export.cc
@@ -161,7 +161,8 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_SOURCE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/ExportDiff.cc b/src/tools/rbd/action/ExportDiff.cc
index 245bbf3..13e7346 100644
--- a/src/tools/rbd/action/ExportDiff.cc
+++ b/src/tools/rbd/action/ExportDiff.cc
@@ -213,7 +213,8 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_SOURCE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Feature.cc b/src/tools/rbd/action/Feature.cc
index 12d4dd8..d1b6d1f 100644
--- a/src/tools/rbd/action/Feature.cc
+++ b/src/tools/rbd/action/Feature.cc
@@ -45,7 +45,7 @@ int execute(const po::variables_map &vm, bool enabled) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Flatten.cc b/src/tools/rbd/action/Flatten.cc
index 5122543..4d231fd 100644
--- a/src/tools/rbd/action/Flatten.cc
+++ b/src/tools/rbd/action/Flatten.cc
@@ -40,7 +40,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/ImageMeta.cc b/src/tools/rbd/action/ImageMeta.cc
index 2cf1a25..13d9be7 100644
--- a/src/tools/rbd/action/ImageMeta.cc
+++ b/src/tools/rbd/action/ImageMeta.cc
@@ -130,7 +130,7 @@ int execute_list(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -172,7 +172,7 @@ int execute_get(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -216,7 +216,7 @@ int execute_set(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -264,7 +264,7 @@ int execute_remove(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Import.cc b/src/tools/rbd/action/Import.cc
index e7bf4d1..2eee2e8 100644
--- a/src/tools/rbd/action/Import.cc
+++ b/src/tools/rbd/action/Import.cc
@@ -265,7 +265,8 @@ int execute(const po::variables_map &vm) {
   std::string deprecated_image_name;
   if (vm.count(at::IMAGE_NAME)) {
     utils::extract_spec(vm[at::IMAGE_NAME].as<std::string>(),
-                        &deprecated_pool_name, &deprecated_image_name, nullptr);
+                        &deprecated_pool_name, &deprecated_image_name, nullptr,
+                        utils::SPEC_VALIDATION_FULL);
     std::cerr << "rbd: --image is deprecated for import, use --dest"
               << std::endl;
   } else {
@@ -278,7 +279,8 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_DEST, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, false);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL,
+    false);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/ImportDiff.cc b/src/tools/rbd/action/ImportDiff.cc
index fb69c2e..2efdc65 100644
--- a/src/tools/rbd/action/ImportDiff.cc
+++ b/src/tools/rbd/action/ImportDiff.cc
@@ -203,7 +203,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Info.cc b/src/tools/rbd/action/Info.cc
index beb7b08..8e31e45 100644
--- a/src/tools/rbd/action/Info.cc
+++ b/src/tools/rbd/action/Info.cc
@@ -231,7 +231,8 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Kernel.cc b/src/tools/rbd/action/Kernel.cc
index 3c552d9..74eb6d9 100644
--- a/src/tools/rbd/action/Kernel.cc
+++ b/src/tools/rbd/action/Kernel.cc
@@ -249,7 +249,8 @@ int execute_map(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -318,7 +319,7 @@ int execute_unmap(const po::variables_map &vm) {
     r = utils::get_pool_image_snapshot_names(
       vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
       &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
-      false);
+      utils::SPEC_VALIDATION_NONE, false);
     if (r < 0) {
       return r;
     }
diff --git a/src/tools/rbd/action/Lock.cc b/src/tools/rbd/action/Lock.cc
index f9f4a22..f083caf 100644
--- a/src/tools/rbd/action/Lock.cc
+++ b/src/tools/rbd/action/Lock.cc
@@ -116,7 +116,7 @@ int execute_list(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -159,7 +159,7 @@ int execute_add(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -217,7 +217,7 @@ int execute_remove(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/MirrorImage.cc b/src/tools/rbd/action/MirrorImage.cc
index ecb838f..f61c628 100644
--- a/src/tools/rbd/action/MirrorImage.cc
+++ b/src/tools/rbd/action/MirrorImage.cc
@@ -52,7 +52,7 @@ int execute_enable_disable(const po::variables_map &vm, bool enable,
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
       vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-      &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+      &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -99,7 +99,7 @@ int execute_promote(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
       vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-      &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+      &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -132,7 +132,7 @@ int execute_demote(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
       vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-      &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+      &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -163,7 +163,7 @@ int execute_resync(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
       vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-      &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+      &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Nbd.cc b/src/tools/rbd/action/Nbd.cc
index 81ff183..478e42a 100644
--- a/src/tools/rbd/action/Nbd.cc
+++ b/src/tools/rbd/action/Nbd.cc
@@ -111,7 +111,8 @@ int execute_map(const po::variables_map &vm)
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/ObjectMap.cc b/src/tools/rbd/action/ObjectMap.cc
index b14bc72..17525c8 100644
--- a/src/tools/rbd/action/ObjectMap.cc
+++ b/src/tools/rbd/action/ObjectMap.cc
@@ -41,7 +41,8 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_PERMITTED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Remove.cc b/src/tools/rbd/action/Remove.cc
index 6c2d2c3..01b36cb 100644
--- a/src/tools/rbd/action/Remove.cc
+++ b/src/tools/rbd/action/Remove.cc
@@ -41,7 +41,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Rename.cc b/src/tools/rbd/action/Rename.cc
index c076111..925a419 100644
--- a/src/tools/rbd/action/Rename.cc
+++ b/src/tools/rbd/action/Rename.cc
@@ -37,7 +37,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_SOURCE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -47,7 +47,7 @@ int execute(const po::variables_map &vm) {
   std::string dst_snap_name;
   r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_DEST, &arg_index, &dst_pool_name, &dst_image_name,
-    &dst_snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &dst_snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Resize.cc b/src/tools/rbd/action/Resize.cc
index aa7a390..9904427 100644
--- a/src/tools/rbd/action/Resize.cc
+++ b/src/tools/rbd/action/Resize.cc
@@ -43,7 +43,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Snap.cc b/src/tools/rbd/action/Snap.cc
index 5452ef3..345e747 100644
--- a/src/tools/rbd/action/Snap.cc
+++ b/src/tools/rbd/action/Snap.cc
@@ -161,7 +161,7 @@ int execute_list(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -202,7 +202,7 @@ int execute_create(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_SNAP);
   if (r < 0) {
     return r;
   }
@@ -237,7 +237,7 @@ int execute_remove(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -278,7 +278,7 @@ int execute_purge(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -316,7 +316,7 @@ int execute_rollback(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -351,7 +351,7 @@ int execute_protect(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -386,7 +386,7 @@ int execute_unprotect(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
@@ -422,7 +422,8 @@ int execute_rename(const po::variables_map &vm) {
   std::string src_snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_SOURCE, &arg_index, &pool_name, &image_name,
-    &src_snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &src_snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED,
+    utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return -r;
   }
@@ -432,7 +433,8 @@ int execute_rename(const po::variables_map &vm) {
   std::string dest_snap_name;
   r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_DEST, &arg_index, &dest_pool_name,
-    &dest_image_name, &dest_snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED);
+    &dest_image_name, &dest_snap_name, utils::SNAPSHOT_PRESENCE_REQUIRED,
+    utils::SPEC_VALIDATION_SNAP);
   if (r < 0) {
     return -r;
   }
diff --git a/src/tools/rbd/action/Status.cc b/src/tools/rbd/action/Status.cc
index da8fe97..ab37bc8 100644
--- a/src/tools/rbd/action/Status.cc
+++ b/src/tools/rbd/action/Status.cc
@@ -96,7 +96,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/tools/rbd/action/Watch.cc b/src/tools/rbd/action/Watch.cc
index 3e53255..65be93d 100644
--- a/src/tools/rbd/action/Watch.cc
+++ b/src/tools/rbd/action/Watch.cc
@@ -107,7 +107,7 @@ int execute(const po::variables_map &vm) {
   std::string snap_name;
   int r = utils::get_pool_image_snapshot_names(
     vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, &pool_name, &image_name,
-    &snap_name, utils::SNAPSHOT_PRESENCE_NONE);
+    &snap_name, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_NONE);
   if (r < 0) {
     return r;
   }
diff --git a/src/unittest_bufferlist.sh b/src/unittest_bufferlist.sh
index 8ddf24f..5c0b62f 100755
--- a/src/unittest_bufferlist.sh
+++ b/src/unittest_bufferlist.sh
@@ -16,4 +16,4 @@
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU Library Public License for more details.
 #
-CEPH_BUFFER_TRACK=true ./unittest_bufferlist
+CEPH_BUFFER_TRACK=true unittest_bufferlist
diff --git a/src/vstart.sh b/src/vstart.sh
index 9d225ef..fd70ccc 100755
--- a/src/vstart.sh
+++ b/src/vstart.sh
@@ -91,6 +91,7 @@ start_rgw=0
 ip=""
 nodaemon=0
 smallmds=0
+short=0
 ec=0
 hitset=""
 overwrite_conf=1
@@ -131,6 +132,7 @@ usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
 usage=$usage"\t--bluestore use bluestore as the osd objectstore backend\n"
 usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
 usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
+usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
 
 usage_exit() {
 	printf "$usage"
@@ -162,6 +164,9 @@ case $1 in
     --new | -n )
 	    new=1
 	    ;;
+    --short )
+	    short=1
+	    ;;
     --valgrind )
 	    [ -z "$2" ] && usage_exit
 	    valgrind=$2
@@ -465,6 +470,10 @@ cat <<EOF >> $conf_fn
 	auth client required = none
 EOF
 fi
+if [ "$short" -eq 1 ]; then
+    COSDSHORT="        osd max object name len = 460
+        osd max object namespace len = 64"
+fi
 			cat <<EOF >> $conf_fn
 
 [client]
@@ -505,6 +514,7 @@ $DAEMONOPTS
 	bluestore block wal create = true
 $COSDDEBUG
 $COSDMEMSTORE
+$COSDSHORT
 $extra_conf
 [mon]
         mon pg warn min per osd = 3
diff --git a/systemd/ceph-mds at .service b/systemd/ceph-mds at .service
index f13cef4..ae1c29d 100644
--- a/systemd/ceph-mds at .service
+++ b/systemd/ceph-mds at .service
@@ -16,6 +16,9 @@ ProtectHome=true
 ProtectSystem=full
 PrivateTmp=true
 TasksMax=infinity
+Restart=on-failure
+StartLimitInterval=30min
+StartLimitBurst=3
 
 [Install]
 WantedBy=ceph-mds.target
diff --git a/systemd/ceph-mon at .service b/systemd/ceph-mon at .service
index b9501d6..d62c4fd 100644
--- a/systemd/ceph-mon at .service
+++ b/systemd/ceph-mon at .service
@@ -22,6 +22,9 @@ ProtectHome=true
 ProtectSystem=full
 PrivateTmp=true
 TasksMax=infinity
+Restart=on-failure
+StartLimitInterval=30min
+StartLimitBurst=3
 
 [Install]
 WantedBy=ceph-mon.target
diff --git a/systemd/ceph-osd at .service b/systemd/ceph-osd at .service
index 1778db7..df1893e 100644
--- a/systemd/ceph-osd at .service
+++ b/systemd/ceph-osd at .service
@@ -16,6 +16,9 @@ ProtectHome=true
 ProtectSystem=full
 PrivateTmp=true
 TasksMax=infinity
+Restart=on-failure
+StartLimitInterval=30min
+StartLimitBurst=3
 
 [Install]
 WantedBy=ceph-osd.target
diff --git a/systemd/ceph-radosgw at .service b/systemd/ceph-radosgw at .service
index cfa5788..e19ba16 100644
--- a/systemd/ceph-radosgw at .service
+++ b/systemd/ceph-radosgw at .service
@@ -15,6 +15,9 @@ ProtectHome=true
 ProtectSystem=full
 PrivateTmp=true
 TasksMax=infinity
+Restart=on-failure
+StartLimitInterval=30s
+StartLimitBurst=5
 
 [Install]
 WantedBy=ceph-radosgw.target

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-ceph/ceph.git



More information about the Pkg-ceph-commits mailing list